From 117d5356b3b85bee5528268ceae5ad7210363938 Mon Sep 17 00:00:00 2001 From: Pete Fotheringham Date: Wed, 11 May 2022 20:50:58 +0000 Subject: [PATCH] Revert "Merge branch 'lineage-17.1' into 'v1-q'" This reverts merge request !1 --- Documentation/ABI/testing/sysfs-block-zram | 118 +- Documentation/blockdev/zram.txt | 189 +- .../devicetree/bindings/mtd/gpmc-nand.txt | 2 +- .../devicetree/bindings/net/nfc/nxp-nci.txt | 2 +- .../devicetree/bindings/net/nfc/pn544.txt | 2 +- .../bindings/scheduler/sched_hmp.txt | 35 + Documentation/filesystems/sysfs.txt | 8 +- Documentation/networking/bonding.txt | 11 +- Documentation/scheduler/sched-hmp.txt | 1673 ++++++ Documentation/sysctl/kernel.txt | 21 - Documentation/vm/z3fold.txt | 26 + Makefile | 22 +- arch/alpha/include/asm/Kbuild | 1 - arch/alpha/include/asm/io.h | 6 +- arch/alpha/include/asm/uaccess.h | 76 +- arch/alpha/kernel/Makefile | 2 +- arch/alpha/kernel/alpha_ksyms.c | 102 + arch/alpha/kernel/machvec_impl.h | 6 +- arch/alpha/kernel/setup.c | 1 - arch/alpha/kernel/smp.c | 2 +- arch/alpha/lib/Makefile | 33 +- arch/alpha/lib/callback_srm.S | 5 - arch/alpha/lib/checksum.c | 3 - arch/alpha/lib/clear_page.S | 3 +- arch/alpha/lib/clear_user.S | 66 +- arch/alpha/lib/copy_page.S | 3 +- arch/alpha/lib/copy_user.S | 101 +- arch/alpha/lib/csum_ipv6_magic.S | 2 - arch/alpha/lib/csum_partial_copy.c | 2 - arch/alpha/lib/dec_and_lock.c | 2 - arch/alpha/lib/divide.S | 3 - arch/alpha/lib/ev6-clear_page.S | 3 +- arch/alpha/lib/ev6-clear_user.S | 85 +- arch/alpha/lib/ev6-copy_page.S | 3 +- arch/alpha/lib/ev6-copy_user.S | 130 +- arch/alpha/lib/ev6-csum_ipv6_magic.S | 2 - arch/alpha/lib/ev6-divide.S | 3 - arch/alpha/lib/ev6-memchr.S | 3 +- arch/alpha/lib/ev6-memcpy.S | 3 +- arch/alpha/lib/ev6-memset.S | 7 +- arch/alpha/lib/ev67-strcat.S | 3 +- arch/alpha/lib/ev67-strchr.S | 3 +- arch/alpha/lib/ev67-strlen.S | 3 +- arch/alpha/lib/ev67-strncat.S | 3 +- arch/alpha/lib/ev67-strrchr.S | 3 +- arch/alpha/lib/fpreg.c | 7 - arch/alpha/lib/memchr.S | 3 +- arch/alpha/lib/memcpy.c | 5 +- arch/alpha/lib/memmove.S | 3 +- arch/alpha/lib/memset.S | 7 +- arch/alpha/lib/strcat.S | 2 - arch/alpha/lib/strchr.S | 3 +- arch/alpha/lib/strcpy.S | 3 +- arch/alpha/lib/strlen.S | 3 +- arch/alpha/lib/strncat.S | 3 +- arch/alpha/lib/strncpy.S | 3 +- arch/alpha/lib/strrchr.S | 3 +- arch/arc/Kconfig | 2 +- arch/arc/Makefile | 1 - arch/arc/include/asm/page.h | 1 - arch/arc/kernel/entry.S | 4 +- arch/arc/kernel/signal.c | 4 +- arch/arc/kernel/stacktrace.c | 23 +- arch/arc/mm/cache.c | 2 +- arch/arm/Kconfig | 1 - arch/arm/Makefile | 24 +- arch/arm/boot/bootp/Makefile | 2 +- arch/arm/boot/compressed/Makefile | 4 +- arch/arm/boot/compressed/decompress.c | 3 - arch/arm/boot/compressed/head.S | 4 +- arch/arm/boot/dts/am43x-epos-evm.dts | 2 +- arch/arm/boot/dts/at91-sama5d3_xplained.dts | 7 - arch/arm/boot/dts/at91-sama5d4_xplained.dts | 7 - arch/arm/boot/dts/bcm5301x.dtsi | 2 - arch/arm/boot/dts/bcm63138.dtsi | 2 +- arch/arm/boot/dts/bcm7445-bcm97445svmb.dts | 4 +- arch/arm/boot/dts/bcm7445.dtsi | 2 +- arch/arm/boot/dts/bcm963138dvt.dts | 4 +- arch/arm/boot/dts/exynos5250-smdk5250.dts | 2 +- arch/arm/boot/dts/exynos5250-snow-common.dtsi | 2 +- arch/arm/boot/dts/exynos5250-spring.dts | 2 +- arch/arm/boot/dts/exynos5420-arndale-octa.dts | 2 +- arch/arm/boot/dts/exynos5422-odroidxu4.dts | 2 +- arch/arm/boot/dts/imx23-evk.dts | 1 + arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | 5 +- arch/arm/boot/dts/imx6qdl-udoo.dtsi | 5 +- arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi | 2 +- .../boot/dts/omap3-overo-tobiduo-common.dtsi | 2 +- arch/arm/boot/dts/omap3.dtsi | 3 - arch/arm/boot/dts/omap4.dtsi | 5 - arch/arm/boot/dts/omap5.dtsi | 5 - arch/arm/boot/dts/picoxcell-pc3x2.dtsi | 4 - arch/arm/boot/dts/qcom/Makefile | 1 - arch/arm/boot/dts/qcom/dsi-panel-maple.dtsi | 245 + arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi | 12 +- arch/arm/boot/dts/qcom/msm-pm660.dtsi | 20 +- arch/arm/boot/dts/qcom/msm-pmi8998.dtsi | 19 +- arch/arm/boot/dts/qcom/msm8916.dtsi | 4 +- arch/arm/boot/dts/qcom/msm8998-audio.dtsi | 14 +- .../qcom/msm8998-v2.1-yoshino-poplar_kddi.dts | 32 - arch/arm/boot/dts/qcom/msm8998-v2.dtsi | 15 +- .../boot/dts/qcom/msm8998-yoshino-common.dtsi | 19 +- .../qcom/msm8998-yoshino-lilac_common.dtsi | 8 + .../qcom/msm8998-yoshino-maple_common.dtsi | 7 + .../qcom/msm8998-yoshino-poplar_common.dtsi | 7 + .../msm8998-yoshino-poplar_jp-common.dtsi | 251 - .../dts/qcom/msm8998-yoshino-poplar_kddi.dtsi | 233 - arch/arm/boot/dts/qcom/msm8998.dtsi | 124 - .../dts/qcom/vplatform-lfv-msm8996-ivi-la.dts | 14 +- arch/arm/boot/dts/sama5d4.dtsi | 2 +- arch/arm/boot/dts/spear3xx.dtsi | 2 +- arch/arm/boot/dts/tegra20-tamonten.dtsi | 14 +- arch/arm/boot/dts/versatile-ab.dts | 5 +- arch/arm/boot/dts/versatile-pb.dts | 2 +- arch/arm/include/asm/tlb.h | 8 - arch/arm/kernel/Makefile | 6 +- arch/arm/kernel/entry-armv.S | 8 +- arch/arm/kernel/head.S | 6 +- arch/arm/kernel/return_address.c | 4 + arch/arm/kernel/setup.c | 16 +- arch/arm/kernel/signal.c | 14 +- arch/arm/kernel/stacktrace.c | 3 +- arch/arm/kvm/mmu.c | 4 +- arch/arm/mach-footbridge/dc21285.c | 12 +- arch/arm/mach-imx/pm-imx6.c | 2 - arch/arm/mach-imx/suspend-imx53.S | 4 +- arch/arm/mach-imx/suspend-imx6.S | 1 - arch/arm/mach-keystone/keystone.c | 4 +- arch/arm/mach-omap2/board-n8x0.c | 2 +- arch/arm/mach-socfpga/core.h | 2 +- arch/arm/mach-socfpga/platsmp.c | 8 +- arch/arm/mm/Kconfig | 2 +- arch/arm/mm/copypage-fa.c | 35 +- arch/arm/mm/copypage-feroceon.c | 98 +- arch/arm/mm/copypage-v4mc.c | 19 +- arch/arm/mm/copypage-v4wb.c | 41 +- arch/arm/mm/copypage-v4wt.c | 37 +- arch/arm/mm/copypage-xsc3.c | 71 +- arch/arm/mm/copypage-xscale.c | 71 +- arch/arm/mm/mmu.c | 2 - arch/arm/mm/proc-macros.S | 1 - arch/arm/probes/kprobes/core.c | 8 +- arch/arm/probes/kprobes/test-thumb.c | 10 +- arch/arm/probes/uprobes/core.c | 4 +- arch/arm/xen/p2m.c | 33 +- arch/arm64/Kconfig | 2 +- arch/arm64/Kconfig.platforms | 9 - arch/arm64/boot/dts/exynos/exynos7.dtsi | 2 +- arch/arm64/configs/cuttlefish_defconfig | 16 +- .../configs/diffconfig/poplar_kddi_diffconfig | 8 - .../lineage-msm8998-yoshino-lilac_defconfig | 148 +- .../lineage-msm8998-yoshino-maple_defconfig | 148 +- ...neage-msm8998-yoshino-maple_dsds_defconfig | 148 +- .../lineage-msm8998-yoshino-poplar_defconfig | 148 +- ...eage-msm8998-yoshino-poplar_dsds_defconfig | 148 +- ...eage-msm8998-yoshino-poplar_kddi_defconfig | 5303 ----------------- arch/arm64/configs/msmcortex-perf_defconfig | 3 +- arch/arm64/configs/msmcortex_defconfig | 3 +- arch/arm64/configs/sdm660-perf_defconfig | 3 +- arch/arm64/configs/sdm660_defconfig | 3 +- arch/arm64/include/asm/alternative.h | 2 +- arch/arm64/include/asm/assembler.h | 36 +- arch/arm64/include/asm/stacktrace.h | 2 +- arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/kaslr.c | 9 +- arch/arm64/kernel/stacktrace.c | 5 - arch/arm64/kernel/time.c | 2 +- arch/arm64/kernel/topology.c | 176 +- arch/arm64/kernel/traps.c | 8 + arch/arm64/kernel/vdso/vdso.lds.S | 8 +- arch/arm64/mm/mmap.c | 19 +- arch/arm64/mm/mmu.c | 11 +- arch/arm64/mm/proc.S | 4 +- arch/h8300/kernel/asm-offsets.c | 3 - arch/hexagon/kernel/vmlinux.lds.S | 7 +- arch/hexagon/lib/io.c | 4 - arch/ia64/Kconfig.debug | 2 +- arch/ia64/include/asm/ptrace.h | 8 +- arch/ia64/include/asm/syscall.h | 2 +- arch/ia64/include/asm/tlb.h | 10 - arch/ia64/kernel/mca.c | 2 +- arch/ia64/kernel/mca_drv.c | 2 +- arch/ia64/kernel/ptrace.c | 24 +- arch/ia64/mm/discontig.c | 6 +- arch/m68k/Kconfig.machine | 1 - arch/m68k/emu/nfeth.c | 4 +- arch/m68k/include/asm/raw_io.h | 20 +- arch/mips/Kconfig | 4 - arch/mips/Makefile | 2 +- arch/mips/alchemy/board-xxs1500.c | 1 - arch/mips/bcm47xx/Kconfig | 1 - arch/mips/bcm63xx/clk.c | 12 - arch/mips/boot/compressed/decompress.c | 2 - arch/mips/include/asm/hugetlb.h | 8 +- arch/mips/include/asm/octeon/cvmx-bootinfo.h | 4 +- arch/mips/kernel/vmlinux.lds.S | 1 - arch/mips/lantiq/clk.c | 6 - arch/mips/lantiq/irq.c | 2 +- arch/mips/lantiq/xway/dma.c | 14 +- arch/mips/lib/mips-atomic.c | 12 +- arch/mips/mm/c-r4k.c | 2 +- arch/mips/mti-malta/malta-platform.c | 3 +- arch/mips/ralink/of.c | 2 - arch/mips/sni/time.c | 4 +- arch/mips/vdso/vdso.h | 2 +- arch/nios2/include/asm/irqflags.h | 4 +- arch/nios2/include/asm/registers.h | 2 +- arch/nios2/platform/Kconfig.platform | 1 - arch/openrisc/include/asm/barrier.h | 9 - arch/openrisc/kernel/entry.S | 2 - arch/openrisc/kernel/setup.c | 2 - arch/parisc/include/asm/page.h | 2 +- arch/parisc/install.sh | 1 - arch/parisc/kernel/entry.S | 4 +- arch/parisc/kernel/signal.c | 6 - arch/parisc/kernel/smp.c | 19 +- arch/parisc/kernel/traps.c | 2 +- arch/parisc/kernel/unaligned.c | 14 +- arch/parisc/mm/init.c | 4 +- arch/powerpc/Kconfig | 2 +- arch/powerpc/Kconfig.debug | 1 - arch/powerpc/boot/crt0.S | 3 + arch/powerpc/boot/devtree.c | 59 +- arch/powerpc/boot/dts/charon.dts | 2 +- arch/powerpc/boot/dts/digsy_mtc.dts | 2 +- arch/powerpc/boot/dts/fsl/p1010si-post.dtsi | 8 - arch/powerpc/boot/dts/fsl/p2041si-post.dtsi | 16 - arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi | 2 - arch/powerpc/boot/dts/lite5200.dts | 2 +- arch/powerpc/boot/dts/lite5200b.dts | 2 +- arch/powerpc/boot/dts/media5200.dts | 2 +- arch/powerpc/boot/dts/mpc5200b.dtsi | 2 +- arch/powerpc/boot/dts/o2d.dts | 2 +- arch/powerpc/boot/dts/o2d.dtsi | 2 +- arch/powerpc/boot/dts/o2dnt2.dts | 2 +- arch/powerpc/boot/dts/o3dnt.dts | 2 +- arch/powerpc/boot/dts/pcm032.dts | 2 +- arch/powerpc/boot/dts/tqm5200.dts | 2 +- arch/powerpc/boot/ns16550.c | 9 +- arch/powerpc/include/asm/barrier.h | 2 - arch/powerpc/include/asm/code-patching.h | 2 +- arch/powerpc/include/asm/cputable.h | 5 + arch/powerpc/include/asm/dcr-native.h | 8 +- arch/powerpc/include/asm/pgtable.h | 4 +- arch/powerpc/include/asm/ps3.h | 2 - arch/powerpc/include/uapi/asm/errno.h | 1 - arch/powerpc/kernel/btext.c | 4 +- arch/powerpc/kernel/eeh.c | 11 +- arch/powerpc/kernel/iommu.c | 4 +- arch/powerpc/kernel/module_64.c | 2 +- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/prom_init.c | 2 +- arch/powerpc/kernel/smp.c | 2 - arch/powerpc/kvm/book3s_rtas.c | 25 +- arch/powerpc/lib/feature-fixups.c | 17 +- arch/powerpc/lib/ppc_ksyms.c | 1 + arch/powerpc/perf/core-book3s.c | 23 +- arch/powerpc/platforms/52xx/lite5200_sleep.S | 2 +- arch/powerpc/platforms/cell/iommu.c | 1 - arch/powerpc/platforms/embedded6xx/hlwd-pic.c | 1 - arch/powerpc/platforms/powernv/opal-lpc.c | 1 - arch/powerpc/platforms/ps3/mm.c | 12 - arch/powerpc/platforms/pseries/dlpar.c | 7 +- arch/powerpc/platforms/pseries/pci_dlpar.c | 4 +- arch/powerpc/platforms/pseries/suspend.c | 1 + arch/powerpc/sysdev/dcr-low.S | 2 +- arch/powerpc/sysdev/mpic_msgr.c | 2 +- arch/s390/Kconfig | 2 +- arch/s390/hypfs/hypfs_vm.c | 6 +- arch/s390/include/asm/ftrace.h | 1 - arch/s390/include/asm/tlb.h | 13 - arch/s390/kernel/cpcmd.c | 6 +- arch/s390/kernel/dis.c | 4 +- arch/s390/kernel/entry.S | 1 - arch/s390/kernel/ftrace.c | 2 - arch/s390/kernel/jump_label.c | 2 +- arch/s390/kernel/mcount.S | 4 +- arch/s390/kernel/setup.c | 3 + arch/s390/kernel/smp.c | 2 +- arch/s390/kvm/gaccess.h | 23 +- arch/s390/lib/string.c | 15 +- arch/s390/net/bpf_jit_comp.c | 61 +- arch/sh/Kconfig.debug | 1 - arch/sh/drivers/dma/Kconfig | 3 +- arch/sh/include/asm/sfp-machine.h | 8 - arch/sh/include/asm/tlb.h | 10 - arch/sh/kernel/cpu/sh4a/smp-shx3.c | 5 +- arch/sparc/Kconfig | 2 +- arch/sparc/kernel/mdesc.c | 3 +- arch/sparc/lib/iomap.c | 2 - arch/sparc/lib/memset.S | 1 - arch/um/drivers/chan_user.c | 3 +- arch/um/drivers/slip_user.c | 3 +- arch/um/drivers/xterm.c | 5 - arch/um/include/asm/tlb.h | 12 - arch/um/include/shared/registers.h | 4 +- arch/um/kernel/dyn.lds.S | 6 - arch/um/kernel/uml.lds.S | 6 - arch/um/os-Linux/registers.c | 4 +- arch/um/os-Linux/start_up.c | 2 +- arch/x86/Makefile | 6 +- arch/x86/boot/compressed/Makefile | 2 - arch/x86/configs/x86_64_cuttlefish_defconfig | 9 +- arch/x86/entry/entry_64.S | 2 +- arch/x86/include/asm/apic.h | 10 + arch/x86/include/asm/atomic.h | 8 +- arch/x86/include/asm/atomic64_64.h | 8 +- arch/x86/include/asm/barrier.h | 18 - arch/x86/include/asm/bitops.h | 29 +- arch/x86/include/asm/fpu/internal.h | 30 +- arch/x86/include/asm/insn.h | 15 - arch/x86/include/asm/local.h | 8 +- arch/x86/include/asm/page_64_types.h | 2 +- arch/x86/include/asm/percpu.h | 2 +- arch/x86/include/asm/preempt.h | 2 +- arch/x86/include/asm/proto.h | 2 - arch/x86/include/asm/rmwcc.h | 4 +- arch/x86/include/asm/svm.h | 2 - arch/x86/include/asm/tlbflush.h | 11 +- arch/x86/kernel/apic/apic.c | 4 - arch/x86/kernel/apic/io_apic.c | 10 - arch/x86/kernel/apic/x2apic_cluster.c | 3 +- arch/x86/kernel/apic/x2apic_phys.c | 3 +- arch/x86/kernel/cpu/mtrr/generic.c | 6 +- arch/x86/kernel/cpu/perf_event.c | 1 - arch/x86/kernel/cpu/perf_event_amd_iommu.c | 6 +- arch/x86/kernel/crash.c | 3 +- arch/x86/kernel/fpu/signal.c | 18 +- arch/x86/kernel/fpu/xstate.c | 37 +- arch/x86/kernel/irq.c | 4 +- arch/x86/kernel/kprobes/core.c | 5 - arch/x86/kernel/module.c | 1 - arch/x86/kernel/reboot.c | 41 +- arch/x86/kernel/uprobes.c | 10 +- arch/x86/kvm/cpuid.c | 8 +- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/pmu_intel.c | 2 +- arch/x86/kvm/svm.c | 14 +- arch/x86/kvm/x86.c | 21 +- arch/x86/lib/msr-smp.c | 4 +- arch/x86/mm/init_64.c | 6 +- arch/x86/mm/pgtable.c | 2 - arch/x86/net/bpf_jit_comp.c | 11 +- arch/x86/tools/chkobjdump.awk | 1 - arch/x86/tools/relocs.c | 12 +- arch/x86/um/syscalls_64.c | 3 +- arch/x86/xen/enlighten.c | 15 +- arch/x86/xen/p2m.c | 63 +- arch/xtensa/Kconfig | 2 +- arch/xtensa/kernel/irq.c | 2 +- arch/xtensa/platforms/iss/console.c | 17 +- arch/xtensa/platforms/iss/simdisk.c | 1 + block/blk-settings.c | 12 - block/blk-throttle.c | 1 - block/genhd.c | 9 +- block/ioprio.c | 3 - certs/Makefile | 8 - crypto/pcrypt.c | 12 +- crypto/shash.c | 18 +- drivers/acpi/acpi_pnp.c | 3 - drivers/acpi/acpica/acglobal.h | 2 - drivers/acpi/acpica/exoparg1.c | 3 +- drivers/acpi/acpica/hwesleep.c | 8 +- drivers/acpi/acpica/hwsleep.c | 11 +- drivers/acpi/acpica/hwxfsleep.c | 7 - drivers/acpi/acpica/utdelete.c | 1 - drivers/acpi/battery.c | 2 +- drivers/acpi/bus.c | 1 - drivers/acpi/custom_method.c | 4 +- drivers/acpi/device_sysfs.c | 22 +- drivers/acpi/processor_idle.c | 40 - drivers/acpi/resource.c | 2 +- drivers/acpi/scan.c | 2 - drivers/acpi/thermal.c | 54 +- drivers/amba/bus.c | 23 +- drivers/android/binder.c | 38 +- drivers/ata/ahci_sunxi.c | 2 +- drivers/ata/libahci_platform.c | 4 +- drivers/ata/libata-core.c | 39 +- drivers/ata/libata-eh.c | 8 - drivers/ata/pata_arasan_cf.c | 15 +- drivers/ata/pata_ep93xx.c | 2 +- drivers/ata/pata_hpt37x.c | 18 +- drivers/ata/pata_ixp4xx_cf.c | 6 +- drivers/ata/pata_legacy.c | 6 +- drivers/ata/pata_octeon_cf.c | 5 +- drivers/ata/pata_rb532_cf.c | 6 +- drivers/ata/sata_fsl.c | 20 +- drivers/ata/sata_highbank.c | 6 +- drivers/ata/sata_mv.c | 8 +- drivers/atm/eni.c | 3 +- drivers/atm/idt77105.c | 4 +- drivers/atm/idt77252.c | 2 +- drivers/atm/iphase.c | 2 +- drivers/atm/lanai.c | 5 +- drivers/atm/nicstar.c | 26 +- drivers/atm/uPD98402.c | 2 +- drivers/base/core.c | 3 +- drivers/base/cpu.c | 139 + drivers/base/power/wakeirq.c | 14 +- drivers/base/regmap/regcache-rbtree.c | 7 +- drivers/bcma/main.c | 6 +- drivers/block/Kconfig | 5 +- drivers/block/brd.c | 1 + drivers/block/cryptoloop.c | 2 - drivers/block/drbd/drbd_bitmap.c | 2 +- drivers/block/floppy.c | 41 +- drivers/block/rbd.c | 9 + drivers/block/rsxx/core.c | 9 +- drivers/block/virtio_blk.c | 2 - drivers/block/xen-blkback/blkback.c | 28 +- drivers/block/xen-blkback/xenbus.c | 4 +- drivers/block/xen-blkfront.c | 142 +- drivers/block/zram/Kconfig | 36 +- drivers/block/zram/Makefile | 4 +- drivers/block/zram/zcomp.c | 153 +- drivers/block/zram/zcomp.h | 36 +- drivers/block/zram/zcomp_lz4.c | 56 + drivers/block/zram/zcomp_lz4.h | 17 + drivers/block/zram/zcomp_lzo.c | 56 + drivers/block/zram/zcomp_lzo.h | 17 + drivers/block/zram/zram_drv.c | 1391 ++--- drivers/block/zram/zram_drv.h | 55 +- drivers/bluetooth/bfusb.c | 3 - drivers/bluetooth/btusb.c | 37 +- drivers/bus/mips_cdmm.c | 4 +- drivers/bus/omap_l3_noc.c | 4 +- drivers/cdrom/gdrom.c | 13 +- drivers/char/adsprpc.c | 28 +- drivers/char/agp/Kconfig | 2 +- drivers/char/agp/parisc-agp.c | 6 +- drivers/char/diag/diag_dci.c | 8 +- drivers/char/hpet.c | 4 - drivers/char/ipmi/ipmi_watchdog.c | 22 +- drivers/char/mwave/3780i.h | 2 +- drivers/char/pcmcia/cm4000_cs.c | 4 - drivers/char/ttyprintk.c | 11 - drivers/char/virtio_console.c | 4 +- drivers/clk/clk-s2mps11.c | 1 - drivers/clk/clk.c | 30 +- drivers/clk/meson/clk-pll.c | 2 +- drivers/clk/msm/clock-gcc-8998.c | 14 + drivers/clk/msm/clock-osm.c | 45 +- drivers/clk/mvebu/kirkwood.c | 1 - drivers/clk/socfpga/clk-gate-a10.c | 1 - drivers/clk/socfpga/clk-gate.c | 2 +- drivers/clk/ti/fapll.c | 11 +- drivers/clocksource/arm_arch_timer.c | 23 +- drivers/clocksource/mxs_timer.c | 5 +- drivers/cpufreq/cpu-boost.c | 29 +- drivers/cpufreq/cpufreq_times.c | 34 +- drivers/cpufreq/highbank-cpufreq.c | 7 - drivers/cpufreq/intel_pstate.c | 1 - drivers/cpufreq/ls1x-cpufreq.c | 1 - drivers/cpufreq/powernow-k8.c | 9 +- drivers/cpufreq/scpi-cpufreq.c | 1 - drivers/cpuidle/lpm-levels.c | 105 + drivers/cpuidle/sysfs.c | 5 +- drivers/crypto/ixp4xx_crypto.c | 2 +- drivers/crypto/msm/qce.c | 7 +- drivers/crypto/msm/qce50.c | 4 +- drivers/crypto/mxs-dcp.c | 81 +- drivers/crypto/nx/nx-842-pseries.c | 9 +- drivers/crypto/omap-sham.c | 2 +- drivers/crypto/qat/qat_common/adf_init.c | 5 +- drivers/crypto/qat/qat_common/adf_pf2vf_msg.c | 9 - drivers/crypto/qat/qat_common/adf_transport.c | 1 - drivers/crypto/qat/qat_common/qat_hal.c | 6 +- drivers/crypto/qat/qat_common/qat_uclo.c | 1 + drivers/crypto/qat/qat_dh895xcc/adf_isr.c | 29 +- drivers/crypto/qat/qat_dh895xccvf/adf_isr.c | 17 +- drivers/crypto/qce/sha.c | 2 +- drivers/crypto/talitos.c | 6 +- drivers/crypto/ux500/hash/hash_core.c | 1 - drivers/devfreq/governor_msm_adreno_tz.c | 236 +- drivers/dma/Kconfig | 2 +- drivers/dma/acpi-dma.c | 11 +- drivers/dma/at_xdmac.c | 34 +- drivers/dma/dw/Kconfig | 2 - drivers/dma/fsldma.c | 6 - drivers/dma/mmp_pdma.c | 6 + drivers/dma/of-dma.c | 9 +- drivers/dma/pl330.c | 6 +- drivers/dma/pxa_dma.c | 7 + drivers/dma/sh/shdma-base.c | 4 +- drivers/dma/sh/usb-dmac.c | 2 +- drivers/dma/ste_dma40.c | 3 - drivers/edac/altera_edac.c | 2 +- drivers/edac/edac_mc.c | 2 +- drivers/edac/sb_edac.c | 2 +- drivers/edac/synopsys_edac.c | 2 +- drivers/edac/xgene_edac.c | 2 +- drivers/extcon/extcon-max77693.c | 2 +- drivers/extcon/extcon-max8997.c | 1 - drivers/extcon/extcon-sm5502.c | 1 + drivers/extcon/extcon.c | 1 - drivers/firewire/nosy.c | 9 +- drivers/firmware/efi/cper.c | 8 +- drivers/gpio/gpio-pcf857x.c | 2 +- drivers/gpio/gpio-zynq.c | 5 +- .../gpu/drm/amd/amdgpu/amdgpu_connectors.c | 7 - drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +- drivers/gpu/drm/drm_edid.c | 3 +- drivers/gpu/drm/gma500/cdv_intel_dp.c | 2 +- drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c | 22 +- drivers/gpu/drm/gma500/psb_drv.c | 2 - drivers/gpu/drm/i915/i915_drv.h | 5 - drivers/gpu/drm/i915/i915_gem.c | 89 - drivers/gpu/drm/i915/i915_gem_gtt.c | 3 - drivers/gpu/drm/i915/i915_reg.h | 6 - drivers/gpu/drm/i915/intel_pm.c | 6 +- drivers/gpu/drm/imx/imx-ldb.c | 10 - drivers/gpu/drm/msm/dsi/dsi.c | 6 +- drivers/gpu/drm/msm/edp/edp_ctrl.c | 3 +- .../gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 10 +- drivers/gpu/drm/msm/msm_drv.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 +- .../gpu/drm/nouveau/nvkm/engine/device/ctrl.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/bios/shadow.c | 2 +- .../drm/nouveau/nvkm/subdev/i2c/auxgm204.c | 8 +- drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | 1 - drivers/gpu/drm/qxl/qxl_dumb.c | 2 - drivers/gpu/drm/radeon/atombios_encoders.c | 3 +- drivers/gpu/drm/radeon/ci_dpm.c | 6 + drivers/gpu/drm/radeon/radeon_atombios.c | 6 +- drivers/gpu/drm/radeon/radeon_kms.c | 1 - drivers/gpu/drm/radeon/radeon_uvd.c | 4 +- drivers/gpu/drm/udl/udl_connector.c | 2 +- drivers/gpu/drm/virtio/virtgpu_kms.c | 1 - drivers/gpu/msm/adreno_a5xx_preempt.c | 5 +- drivers/gpu/msm/adreno_debugfs.c | 4 +- drivers/gpu/msm/adreno_dispatch.c | 2 +- drivers/gpu/msm/adreno_profile.c | 4 +- drivers/gpu/msm/kgsl.c | 132 +- drivers/gpu/msm/kgsl.h | 16 +- drivers/gpu/msm/kgsl_debugfs.c | 29 +- drivers/gpu/msm/kgsl_device.h | 8 +- drivers/gpu/msm/kgsl_drawobj.c | 35 +- drivers/gpu/msm/kgsl_iommu.c | 17 +- drivers/gpu/msm/kgsl_mmu.c | 13 +- drivers/gpu/msm/kgsl_sharedmem.c | 9 +- drivers/gpu/msm/kgsl_sharedmem.h | 3 +- drivers/gpu/msm/kgsl_trace.h | 16 +- drivers/hid/Kconfig | 12 +- drivers/hid/Makefile | 2 +- drivers/hid/hid-apple.c | 7 - drivers/hid/hid-betopff.c | 13 +- drivers/hid/hid-chicony.c | 8 +- drivers/hid/hid-core.c | 24 +- drivers/hid/hid-corsair.c | 7 +- drivers/hid/hid-debug.c | 4 +- drivers/hid/hid-elo.c | 3 - drivers/hid/hid-gt683r.c | 1 - drivers/hid/hid-holtek-kbd.c | 9 +- drivers/hid/hid-holtek-mouse.c | 24 - drivers/hid/hid-ids.h | 2 +- drivers/hid/hid-input.c | 199 +- drivers/hid/hid-lg.c | 10 +- drivers/hid/hid-plantronics.c | 60 +- drivers/hid/hid-prodikeys.c | 10 +- drivers/hid/hid-roccat-arvo.c | 3 - drivers/hid/hid-roccat-isku.c | 3 - drivers/hid/hid-roccat-kone.c | 3 - drivers/hid/hid-roccat-koneplus.c | 3 - drivers/hid/hid-roccat-konepure.c | 3 - drivers/hid/hid-roccat-kovaplus.c | 3 - drivers/hid/hid-roccat-lua.c | 3 - drivers/hid/hid-roccat-pyra.c | 3 - drivers/hid/hid-roccat-ryos.c | 3 - drivers/hid/hid-roccat-savu.c | 3 - drivers/hid/hid-samsung.c | 3 - drivers/hid/hid-sensor-hub.c | 13 +- drivers/hid/hid-steam.c | 4 - drivers/hid/hid-uclogic.c | 3 - drivers/hid/i2c-hid/i2c-hid.c | 3 +- drivers/hid/uhid.c | 32 +- drivers/hid/usbhid/hid-core.c | 18 +- drivers/hid/usbhid/hid-pidff.c | 1 - drivers/hid/wacom_sys.c | 17 +- drivers/hsi/hsi.c | 4 +- drivers/hv/hyperv_vmbus.h | 1 - drivers/hwmon/dell-smm-hwmon.c | 19 +- drivers/hwmon/lm80.c | 11 +- drivers/hwmon/lm90.c | 7 +- drivers/hwmon/pmbus/lm25066.c | 23 - drivers/hwtracing/intel_th/gth.c | 4 +- drivers/i2c/busses/Kconfig | 2 +- drivers/i2c/busses/i2c-bcm2835.c | 11 - drivers/i2c/busses/i2c-brcmstb.c | 2 +- drivers/i2c/busses/i2c-cadence.c | 5 +- drivers/i2c/busses/i2c-designware-pcidrv.c | 8 +- drivers/i2c/busses/i2c-highlander.c | 2 +- drivers/i2c/busses/i2c-i801.c | 21 +- drivers/i2c/busses/i2c-imx.c | 30 +- drivers/i2c/busses/i2c-iop3xx.c | 6 +- drivers/i2c/busses/i2c-jz4780.c | 5 +- drivers/i2c/busses/i2c-mpc.c | 118 +- drivers/i2c/busses/i2c-mt65xx.c | 2 +- drivers/i2c/busses/i2c-rk3x.c | 4 +- drivers/i2c/busses/i2c-robotfuzz-osif.c | 4 +- drivers/i2c/busses/i2c-s3c2410.c | 6 +- drivers/i2c/busses/i2c-sh7760.c | 5 +- drivers/i2c/i2c-dev.c | 5 +- drivers/ide/ide-cd.c | 8 +- drivers/ide/ide-cd.h | 6 +- drivers/iio/accel/bma180.c | 85 +- drivers/iio/accel/kxcjk-1013.c | 5 +- drivers/iio/accel/mma8452.c | 2 +- drivers/iio/accel/stk8312.c | 12 +- drivers/iio/accel/stk8ba50.c | 17 +- drivers/iio/adc/ad7793.c | 1 - drivers/iio/adc/men_z188_adc.c | 9 +- drivers/iio/adc/qcom-rradc.c | 136 +- drivers/iio/adc/rockchip_saradc.c | 2 +- drivers/iio/adc/ti-adc128s052.c | 6 - drivers/iio/common/ssp_sensors/ssp_spi.c | 11 +- drivers/iio/dac/ad5446.c | 9 +- drivers/iio/dac/ad5504.c | 4 +- drivers/iio/dac/ad5624r_spi.c | 18 +- drivers/iio/gyro/itg3200_buffer.c | 2 +- drivers/iio/imu/adis16400_buffer.c | 5 +- drivers/iio/imu/adis_buffer.c | 8 +- drivers/iio/industrialio-buffer.c | 6 +- drivers/iio/light/hid-sensor-prox.c | 14 +- drivers/iio/light/ltr501.c | 17 +- drivers/iio/light/stk3310.c | 6 +- drivers/iio/magnetometer/mag3110.c | 13 +- drivers/iio/pressure/mpl3115.c | 9 +- drivers/infiniband/core/cm.c | 2 - drivers/infiniband/core/cma.c | 3 +- drivers/infiniband/core/device.c | 3 +- drivers/infiniband/core/user_mad.c | 7 +- drivers/infiniband/hw/cxgb4/cm.c | 5 +- drivers/infiniband/hw/cxgb4/qp.c | 4 +- drivers/infiniband/hw/mlx4/qp.c | 4 +- drivers/infiniband/hw/mthca/mthca_cq.c | 2 +- drivers/infiniband/hw/mthca/mthca_dev.h | 1 + drivers/infiniband/hw/qib/qib_user_sdma.c | 35 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 3 - drivers/infiniband/ulp/srp/ib_srp.c | 6 +- drivers/input/ff-core.c | 13 +- drivers/input/input.c | 6 - drivers/input/joydev.c | 9 +- drivers/input/joystick/spaceball.c | 11 +- drivers/input/joystick/xpad.c | 22 +- drivers/input/keyboard/cros_ec_keyb.c | 1 - drivers/input/keyboard/hil_kbd.c | 1 - drivers/input/keyboard/nspire-keypad.c | 56 +- drivers/input/misc/cm109.c | 7 +- drivers/input/misc/uinput.c | 18 - drivers/input/mouse/appletouch.c | 4 +- drivers/input/mouse/cyapa_gen6.c | 2 +- drivers/input/mouse/elan_i2c_core.c | 64 +- drivers/input/mouse/elantech.c | 13 - drivers/input/serio/i8042-x86ia64io.h | 68 - drivers/input/serio/i8042.c | 3 +- drivers/input/touchscreen/ads7846.c | 8 +- drivers/input/touchscreen/elo.c | 4 +- drivers/input/touchscreen/goodix.c | 12 - drivers/input/touchscreen/of_touchscreen.c | 18 +- drivers/input/touchscreen/usbtouchscreen.c | 8 +- drivers/iommu/amd_iommu.c | 2 +- drivers/iommu/dmar.c | 43 +- drivers/iommu/intel_irq_remapping.c | 2 - drivers/ipack/carriers/tpci200.c | 33 +- drivers/ipack/devices/ipoctal.c | 63 +- drivers/irqchip/irq-gic-v3-its.c | 2 +- drivers/irqchip/irq-nvic.c | 2 +- drivers/irqchip/irq-s3c24xx.c | 22 +- drivers/isdn/capi/kcapi.c | 9 +- drivers/isdn/hardware/mISDN/hfcpci.c | 2 +- drivers/isdn/hardware/mISDN/mISDNinfineon.c | 24 +- drivers/isdn/hardware/mISDN/mISDNipac.c | 2 +- drivers/isdn/hardware/mISDN/netjet.c | 3 +- drivers/isdn/mISDN/Kconfig | 1 - drivers/isdn/mISDN/core.c | 6 +- drivers/isdn/mISDN/core.h | 4 +- drivers/isdn/mISDN/layer1.c | 4 +- drivers/leds/leds-lp5523.c | 2 +- drivers/leds/leds-qpnp-wled.c | 90 +- drivers/md/dm-era-target.c | 93 +- drivers/md/dm-ioctl.c | 3 +- drivers/md/dm-req-crypt.c | 2 - drivers/md/dm-snap.c | 3 +- drivers/md/dm-table.c | 104 +- drivers/md/dm-verity-fec.c | 8 +- drivers/md/dm-verity-target.c | 11 +- drivers/md/md.c | 35 +- .../md/persistent-data/dm-btree-internal.h | 4 +- drivers/md/persistent-data/dm-btree-remove.c | 5 +- drivers/md/persistent-data/dm-btree.c | 8 +- .../md/persistent-data/dm-space-map-common.c | 7 - .../md/persistent-data/dm-space-map-common.h | 8 +- .../md/persistent-data/dm-space-map-disk.c | 9 +- .../persistent-data/dm-space-map-metadata.c | 9 +- drivers/media/common/saa7146/saa7146_fops.c | 2 +- drivers/media/common/siano/smscoreapi.c | 22 +- drivers/media/common/siano/smscoreapi.h | 4 +- drivers/media/dvb-core/dvb_net.c | 25 +- drivers/media/dvb-frontends/dib8000.c | 4 +- drivers/media/dvb-frontends/sp8870.c | 4 +- drivers/media/i2c/adv7511-v4l2.c | 2 +- drivers/media/i2c/mt9p031.c | 28 +- drivers/media/i2c/s5c73m3/s5c73m3-core.c | 6 +- drivers/media/i2c/s5c73m3/s5c73m3.h | 2 +- drivers/media/i2c/s5k4ecgx.c | 10 +- drivers/media/i2c/s5k5baf.c | 6 +- drivers/media/i2c/s5k6aa.c | 10 +- drivers/media/i2c/tc358743.c | 1 - drivers/media/pci/b2c2/flexcop-pci.c | 3 - drivers/media/pci/bt8xx/bt878.c | 3 - drivers/media/pci/cx25821/cx25821-core.c | 4 +- .../pci/netup_unidvb/netup_unidvb_core.c | 27 +- .../media/pci/netup_unidvb/netup_unidvb_spi.c | 5 +- drivers/media/pci/ngene/ngene-core.c | 2 +- drivers/media/pci/ngene/ngene.h | 14 +- drivers/media/pci/saa7134/saa7134-empress.c | 5 +- drivers/media/pci/saa7146/hexium_gemini.c | 7 +- drivers/media/pci/saa7146/hexium_orion.c | 8 +- drivers/media/pci/saa7146/mxb.c | 27 +- drivers/media/pci/saa7164/saa7164-encoder.c | 20 +- drivers/media/pci/solo6x10/solo6x10-g723.c | 2 +- .../msm/camera_v2/isp/msm_isp_stats_util.c | 2 +- drivers/media/platform/s5p-g2d/g2d.c | 3 - drivers/media/platform/s5p-mfc/s5p_mfc.c | 2 +- drivers/media/platform/vivid/vivid-vid-out.c | 2 +- drivers/media/radio/si470x/radio-si470x-i2c.c | 2 +- drivers/media/radio/si470x/radio-si470x-usb.c | 2 +- drivers/media/rc/igorplugusb.c | 4 +- drivers/media/rc/ite-cir.c | 6 - drivers/media/rc/mceusb.c | 9 +- drivers/media/rc/rc-loopback.c | 2 +- drivers/media/rc/sunxi-cir.c | 2 - drivers/media/tuners/m88rs6000t.c | 6 +- drivers/media/tuners/msi001.c | 7 - drivers/media/usb/cpia2/cpia2.h | 1 - drivers/media/usb/cpia2/cpia2_core.c | 12 - drivers/media/usb/cpia2/cpia2_usb.c | 13 +- drivers/media/usb/dvb-usb-v2/lmedm04.c | 2 +- drivers/media/usb/dvb-usb-v2/rtl28xxu.c | 11 +- drivers/media/usb/dvb-usb/az6027.c | 1 - drivers/media/usb/dvb-usb/cxusb.c | 2 +- drivers/media/usb/dvb-usb/dib0700_core.c | 2 + drivers/media/usb/dvb-usb/dibusb-common.c | 2 +- drivers/media/usb/dvb-usb/dvb-usb-init.c | 20 +- drivers/media/usb/dvb-usb/dvb-usb.h | 3 +- drivers/media/usb/dvb-usb/gp8psk.c | 2 +- drivers/media/usb/dvb-usb/m920x.c | 12 +- drivers/media/usb/dvb-usb/nova-t-usb2.c | 6 +- drivers/media/usb/dvb-usb/vp702x.c | 12 +- drivers/media/usb/em28xx/em28xx-core.c | 4 +- drivers/media/usb/em28xx/em28xx-dvb.c | 1 - drivers/media/usb/go7007/go7007-driver.c | 26 + drivers/media/usb/gspca/gspca.c | 3 - drivers/media/usb/gspca/gspca.h | 1 - drivers/media/usb/gspca/m5602/m5602_po1030.c | 10 +- drivers/media/usb/gspca/sq905.c | 4 +- drivers/media/usb/gspca/stv06xx/stv06xx.c | 9 - drivers/media/usb/gspca/sunplus.c | 8 +- drivers/media/usb/hdpvr/hdpvr-core.c | 33 +- drivers/media/usb/msi2500/msi2500.c | 2 +- drivers/media/usb/pvrusb2/pvrusb2-hdw.c | 12 +- drivers/media/usb/stk1160/stk1160-core.c | 4 +- drivers/media/usb/stkwebcam/stk-webcam.c | 6 +- drivers/media/usb/tm6000/tm6000-dvb.c | 4 - drivers/media/usb/usbtv/usbtv-audio.c | 2 +- drivers/media/usb/uvc/uvc_driver.c | 7 +- drivers/media/usb/uvc/uvc_v4l2.c | 59 +- drivers/media/usb/uvc/uvc_video.c | 31 - drivers/media/usb/zr364xx/zr364xx.c | 1 - drivers/media/v4l2-core/v4l2-fh.c | 1 - drivers/media/v4l2-core/v4l2-ioctl.c | 19 +- drivers/media/v4l2-core/videobuf2-core.c | 13 +- drivers/memory/fsl_ifc.c | 17 +- drivers/memory/omap-gpmc.c | 7 +- drivers/memstick/core/memstick.c | 1 + drivers/memstick/core/ms_block.c | 2 +- drivers/memstick/host/jmb38x_ms.c | 2 +- drivers/memstick/host/r592.c | 20 +- drivers/mfd/da9052-i2c.c | 1 - drivers/mfd/intel-lpss-acpi.c | 7 +- drivers/mfd/stmpe-i2c.c | 2 +- drivers/mfd/wm831x-auxadc.c | 3 +- drivers/misc/Kconfig | 1 - drivers/misc/Makefile | 1 - drivers/misc/carillon/Kconfig | 5 - drivers/misc/carillon/Makefile | 2 - drivers/misc/carillon/bd7602/bd7602.c | 311 - drivers/misc/carillon/bd7602/bd7602.h | 18 - drivers/misc/carillon/cxd224x/cxd224x-i2c.c | 843 --- drivers/misc/carillon/cxd224x/cxd224x.h | 47 - drivers/misc/carillon/main_module.c | 54 - drivers/misc/cb710/sgbuf2.c | 2 +- drivers/misc/eeprom/eeprom_93xx46.c | 1 - drivers/misc/hdcp.c | 4 - drivers/misc/ibmasm/module.c | 5 +- drivers/misc/kgdbts.c | 27 +- drivers/misc/lattice-ecp3-config.c | 12 +- drivers/misc/lis3lv02d/lis3lv02d.c | 21 +- drivers/misc/lis3lv02d/lis3lv02d.h | 1 - drivers/misc/mei/interrupt.c | 3 - drivers/misc/qseecom.c | 129 +- drivers/misc/vmw_vmci/vmci_context.c | 2 +- drivers/misc/vmw_vmci/vmci_doorbell.c | 2 +- drivers/misc/vmw_vmci/vmci_guest.c | 2 +- drivers/misc/vmw_vmci/vmci_queue_pair.c | 11 +- drivers/mmc/core/mmc.c | 15 +- drivers/mmc/core/sd.c | 6 - drivers/mmc/core/sdio_cis.c | 6 - drivers/mmc/host/Kconfig | 2 +- drivers/mmc/host/dw_mmc-exynos.c | 14 - drivers/mmc/host/dw_mmc.c | 49 +- drivers/mmc/host/moxart-mmc.c | 3 +- drivers/mmc/host/mtk-sd.c | 18 +- drivers/mmc/host/mxs-mmc.c | 2 +- drivers/mmc/host/rtsx_pci_sdmmc.c | 36 +- drivers/mmc/host/sdhci-esdhc-imx.c | 3 +- drivers/mmc/host/sdhci.c | 10 - drivers/mmc/host/sdhci.h | 1 - drivers/mmc/host/usdhi6rol0.c | 5 +- drivers/mmc/host/via-sdmmc.c | 3 - drivers/mmc/host/vub300.c | 20 +- drivers/mtd/cmdlinepart.c | 14 +- drivers/mtd/mtdchar.c | 8 +- drivers/mtd/nand/brcmnand/brcmnand.c | 2 +- drivers/mtd/nand/cafe_nand.c | 4 +- drivers/mtd/nand/diskonchip.c | 7 +- drivers/mtd/nand/orion_nand.c | 2 +- drivers/mtd/nand/pasemi_nand.c | 4 +- drivers/mtd/nand/plat_nand.c | 2 +- drivers/mtd/nand/sharpsl.c | 2 +- drivers/mtd/nand/socrates_nand.c | 2 +- drivers/mtd/nand/tmio_nand.c | 2 +- drivers/net/appletalk/cops.c | 4 +- drivers/net/arcnet/com20020-pci.c | 3 - drivers/net/bonding/bond_3ad.c | 3 +- drivers/net/bonding/bond_main.c | 69 +- drivers/net/bonding/bond_options.c | 2 +- drivers/net/bonding/bond_sysfs_slave.c | 54 +- drivers/net/caif/caif_serial.c | 2 +- drivers/net/can/c_can/c_can.c | 24 +- drivers/net/can/c_can/c_can_pci.c | 3 +- drivers/net/can/c_can/c_can_platform.c | 6 +- drivers/net/can/dev.c | 7 +- drivers/net/can/flexcan.c | 20 +- drivers/net/can/m_can/m_can.c | 3 + drivers/net/can/pch_can.c | 2 +- drivers/net/can/rcar_can.c | 20 +- drivers/net/can/sja1000/ems_pcmcia.c | 7 +- drivers/net/can/sja1000/peak_pci.c | 9 +- drivers/net/can/softing/softing_cs.c | 2 +- drivers/net/can/softing/softing_fw.c | 11 +- drivers/net/can/softing/softing_main.c | 9 +- drivers/net/can/usb/ems_usb.c | 17 +- drivers/net/can/usb/esd_usb2.c | 20 +- drivers/net/can/usb/gs_usb.c | 15 +- drivers/net/can/usb/kvaser_usb.c | 41 +- drivers/net/can/usb/peak_usb/pcan_usb_core.c | 6 +- drivers/net/can/usb/peak_usb/pcan_usb_fd.c | 5 +- drivers/net/can/usb/usb_8dev.c | 15 +- drivers/net/can/xilinx_can.c | 7 +- drivers/net/dsa/bcm_sf2.c | 6 +- drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/aeroflex/greth.c | 3 +- drivers/net/ethernet/allwinner/sun4i-emac.c | 7 +- drivers/net/ethernet/altera/altera_tse_main.c | 9 +- drivers/net/ethernet/amd/pcnet32.c | 5 +- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 +- drivers/net/ethernet/arc/Kconfig | 1 - drivers/net/ethernet/broadcom/bcmsysport.c | 5 - drivers/net/ethernet/broadcom/bcmsysport.h | 1 - drivers/net/ethernet/broadcom/bnx2.c | 2 +- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 +- .../ethernet/broadcom/bnx2x/bnx2x_init_ops.h | 4 +- .../net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 4 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +- .../net/ethernet/broadcom/genet/bcmgenet.c | 35 +- .../ethernet/broadcom/genet/bcmgenet_wol.c | 6 + drivers/net/ethernet/cadence/macb.c | 3 - .../ethernet/cavium/liquidio/cn66xx_regs.h | 2 +- .../ethernet/cavium/thunder/nicvf_queues.c | 2 +- drivers/net/ethernet/chelsio/cxgb/cxgb2.c | 1 - drivers/net/ethernet/chelsio/cxgb3/sge.c | 1 - drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | 2 - .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2 +- drivers/net/ethernet/davicom/dm9000.c | 27 +- drivers/net/ethernet/dec/tulip/de4x5.c | 34 +- drivers/net/ethernet/dec/tulip/winbond-840.c | 7 +- drivers/net/ethernet/ec_bhf.c | 4 +- drivers/net/ethernet/emulex/benet/be_main.c | 1 - drivers/net/ethernet/ezchip/nps_enet.c | 4 +- drivers/net/ethernet/freescale/fec.h | 3 - drivers/net/ethernet/freescale/fec_main.c | 2 +- drivers/net/ethernet/freescale/fec_ptp.c | 11 - .../ethernet/freescale/fs_enet/mii-bitbang.c | 1 - .../net/ethernet/freescale/fs_enet/mii-fec.c | 1 - drivers/net/ethernet/freescale/gianfar.c | 6 +- drivers/net/ethernet/freescale/ucc_geth.c | 2 +- drivers/net/ethernet/freescale/ucc_geth.h | 9 +- drivers/net/ethernet/freescale/xgmac_mdio.c | 3 +- drivers/net/ethernet/fujitsu/fmvj18x_cs.c | 5 - .../net/ethernet/hisilicon/hns/hns_ethtool.c | 4 - drivers/net/ethernet/i825xx/82596.c | 2 +- drivers/net/ethernet/i825xx/sni_82596.c | 3 +- drivers/net/ethernet/ibm/ehea/ehea_main.c | 9 +- drivers/net/ethernet/intel/e100.c | 34 +- drivers/net/ethernet/intel/e1000e/82571.c | 2 - drivers/net/ethernet/intel/e1000e/ich8lan.c | 14 +- drivers/net/ethernet/intel/e1000e/ich8lan.h | 3 - drivers/net/ethernet/intel/e1000e/netdev.c | 6 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 42 +- .../net/ethernet/intel/i40evf/i40evf_main.c | 1 - drivers/net/ethernet/intel/igb/igb_main.c | 11 +- drivers/net/ethernet/intel/igbvf/netdev.c | 1 - drivers/net/ethernet/korina.c | 2 +- drivers/net/ethernet/marvell/mvneta.c | 2 +- drivers/net/ethernet/marvell/pxa168_eth.c | 2 +- .../net/ethernet/mellanox/mlx4/en_ethtool.c | 6 +- .../net/ethernet/mellanox/mlx4/en_netdev.c | 22 +- drivers/net/ethernet/mellanox/mlx4/main.c | 1 - drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 8 +- drivers/net/ethernet/mellanox/mlx4/port.c | 107 +- drivers/net/ethernet/micrel/ks8842.c | 4 - .../ethernet/microchip/encx24j600-regmap.c | 10 +- drivers/net/ethernet/microchip/encx24j600.c | 5 +- .../net/ethernet/microchip/encx24j600_hw.h | 4 +- drivers/net/ethernet/moxa/moxart_ether.c | 4 +- .../net/ethernet/myricom/myri10ge/myri10ge.c | 1 - drivers/net/ethernet/natsemi/natsemi.c | 8 +- drivers/net/ethernet/natsemi/xtsonic.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 2 +- .../net/ethernet/neterion/vxge/vxge-main.c | 6 +- .../ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 29 +- drivers/net/ethernet/pasemi/pasemi_mac.c | 8 +- .../ethernet/qlogic/netxen/netxen_nic_main.c | 9 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 2 +- drivers/net/ethernet/qlogic/qla3xxx.c | 27 +- .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 14 +- .../ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 3 +- .../net/ethernet/qlogic/qlcnic/qlcnic_init.c | 1 + .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 - .../ethernet/qlogic/qlcnic/qlcnic_minidump.c | 3 - .../net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | 2 +- .../qlogic/qlcnic/qlcnic_sriov_common.c | 12 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 4 +- drivers/net/ethernet/rdc/r6040.c | 9 +- drivers/net/ethernet/realtek/r8169.c | 2 +- drivers/net/ethernet/renesas/sh_eth.c | 7 +- .../net/ethernet/samsung/sxgbe/sxgbe_main.c | 6 +- drivers/net/ethernet/sfc/ef10_sriov.c | 25 +- drivers/net/ethernet/sis/sis900.c | 7 +- drivers/net/ethernet/smsc/smc911x.c | 5 - .../ethernet/stmicro/stmmac/dwmac-ipq806x.c | 2 - .../net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 8 +- .../net/ethernet/stmicro/stmmac/dwmac1000.h | 8 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 19 +- drivers/net/ethernet/sun/niu.c | 35 +- drivers/net/ethernet/tehuti/tehuti.c | 1 - drivers/net/ethernet/ti/davinci_emac.c | 20 +- drivers/net/ethernet/ti/netcp_core.c | 2 +- drivers/net/ethernet/ti/tlan.c | 3 +- drivers/net/ethernet/xilinx/ll_temac_main.c | 3 - .../net/ethernet/xilinx/xilinx_axienet_main.c | 4 +- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 5 +- drivers/net/fddi/Kconfig | 15 +- drivers/net/fddi/defxx.c | 47 +- drivers/net/hamradio/6pack.c | 10 +- drivers/net/hamradio/mkiss.c | 8 +- drivers/net/ieee802154/atusb.c | 11 +- drivers/net/phy/dp83640_reg.h | 2 +- drivers/net/phy/mdio_bus.c | 12 +- drivers/net/ppp/ppp_generic.c | 7 +- drivers/net/tun.c | 53 - drivers/net/usb/Kconfig | 4 - drivers/net/usb/ax88179_178a.c | 72 +- drivers/net/usb/cdc-phonet.c | 2 - drivers/net/usb/cdc_eem.c | 2 +- drivers/net/usb/cdc_ether.c | 12 - drivers/net/usb/cdc_ncm.c | 15 +- drivers/net/usb/hso.c | 96 +- drivers/net/usb/ipheth.c | 8 +- drivers/net/usb/lan78xx.c | 18 +- drivers/net/usb/mcs7830.c | 12 +- drivers/net/usb/pegasus.c | 14 +- drivers/net/usb/qmi_wwan.c | 2 - drivers/net/usb/r8152.c | 5 +- drivers/net/usb/rndis_host.c | 7 +- drivers/net/usb/smsc75xx.c | 12 +- drivers/net/usb/sr9700.c | 2 +- drivers/net/usb/usbnet.c | 5 - drivers/net/usb/zaurus.c | 12 - drivers/net/virtio_net.c | 12 +- drivers/net/vmxnet3/vmxnet3_drv.c | 1 + drivers/net/vxlan.c | 2 - drivers/net/wan/hdlc_ppp.c | 7 - drivers/net/wan/lapbether.c | 33 +- drivers/net/wan/lmc/lmc_main.c | 2 - drivers/net/wimax/i2400m/op-rfkill.c | 2 +- drivers/net/wireless/ath/ar5523/ar5523.c | 4 - drivers/net/wireless/ath/ath.h | 3 +- drivers/net/wireless/ath/ath10k/mac.c | 16 +- drivers/net/wireless/ath/ath5k/mac80211-ops.c | 2 +- drivers/net/wireless/ath/ath6kl/usb.c | 7 +- drivers/net/wireless/ath/ath6kl/wmi.c | 4 +- .../net/wireless/ath/ath9k/ar9003_eeprom.c | 3 +- drivers/net/wireless/ath/ath9k/ath9k.h | 3 +- drivers/net/wireless/ath/ath9k/hif_usb.c | 7 - drivers/net/wireless/ath/ath9k/htc_drv_init.c | 2 +- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 2 +- drivers/net/wireless/ath/ath9k/hw.c | 2 +- drivers/net/wireless/ath/ath9k/hw.h | 1 - drivers/net/wireless/ath/ath9k/main.c | 104 +- drivers/net/wireless/ath/ath9k/xmit.c | 6 - drivers/net/wireless/ath/carl9170/Kconfig | 8 +- .../net/wireless/ath/dfs_pattern_detector.c | 10 +- drivers/net/wireless/ath/key.c | 41 +- drivers/net/wireless/ath/wcn36xx/main.c | 4 +- drivers/net/wireless/ath/wil6210/Kconfig | 1 - drivers/net/wireless/b43/phy_g.c | 2 +- drivers/net/wireless/b43/phy_n.c | 2 +- drivers/net/wireless/b43legacy/radio.c | 2 +- .../wireless/brcm80211/brcmsmac/mac80211_if.c | 8 +- drivers/net/wireless/cw1200/cw1200_sdio.c | 1 - drivers/net/wireless/cw1200/main.c | 2 - drivers/net/wireless/ipw2x00/libipw_wx.c | 6 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 17 - drivers/net/wireless/iwlwifi/mvm/ops.c | 3 +- drivers/net/wireless/iwlwifi/mvm/utils.c | 3 - drivers/net/wireless/iwlwifi/pcie/tx.c | 16 +- drivers/net/wireless/libertas/if_sdio.c | 5 - drivers/net/wireless/libertas/if_usb.c | 2 - drivers/net/wireless/libertas/mesh.c | 28 +- drivers/net/wireless/libertas_tf/if_usb.c | 2 - drivers/net/wireless/mac80211_hwsim.c | 13 - drivers/net/wireless/mediatek/mt7601u/dma.c | 5 +- .../net/wireless/mediatek/mt7601u/eeprom.c | 2 +- drivers/net/wireless/mwifiex/11n.c | 5 +- drivers/net/wireless/mwifiex/join.c | 2 - drivers/net/wireless/mwifiex/pcie.c | 8 - drivers/net/wireless/mwifiex/usb.c | 19 +- drivers/net/wireless/mwl8k.c | 3 +- drivers/net/wireless/orinoco/orinoco_usb.c | 14 +- drivers/net/wireless/realtek/rtlwifi/base.c | 19 +- .../wireless/realtek/rtlwifi/rtl8192cu/hw.c | 1 - drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +- drivers/net/wireless/ti/wl1251/cmd.c | 9 +- drivers/net/wireless/ti/wl12xx/main.c | 10 +- drivers/net/wireless/ti/wlcore/main.c | 15 +- drivers/net/wireless/ti/wlcore/wlcore.h | 3 + drivers/net/wireless/wl3501.h | 47 +- drivers/net/wireless/wl3501_cs.c | 54 +- drivers/net/xen-netback/interface.c | 9 +- drivers/net/xen-netback/netback.c | 25 +- drivers/net/xen-netback/xenbus.c | 14 +- drivers/net/xen-netfront.c | 380 +- drivers/nfc/pn533.c | 9 +- drivers/nfc/port100.c | 4 +- drivers/nfc/s3fwrn5/firmware.c | 4 +- drivers/nvdimm/dimm_devs.c | 18 +- drivers/nvdimm/nd.h | 1 + drivers/nvmem/core.c | 3 +- drivers/parisc/ccio-dma.c | 3 +- drivers/parisc/dino.c | 18 +- drivers/parisc/pdc_stable.c | 4 +- drivers/parisc/sba_iommu.c | 3 +- drivers/parport/ieee1284_ops.c | 2 +- drivers/pci/host/pci-xgene-msi.c | 10 +- drivers/pci/hotplug/acpiphp_glue.c | 1 - drivers/pci/hotplug/rpadlpar_sysfs.c | 14 +- drivers/pci/msi.c | 156 +- drivers/pci/pci-label.c | 2 +- drivers/pci/pci.c | 15 +- drivers/pci/probe.c | 1 - drivers/pci/quirks.c | 42 +- drivers/pci/slot.c | 6 +- drivers/pci/syscall.c | 14 +- drivers/pcmcia/cs.c | 8 +- drivers/pcmcia/i82092.c | 1 - drivers/pcmcia/rsrc_nonstatic.c | 6 - drivers/phy/phy-dm816x-usb.c | 17 +- drivers/phy/phy-twl4030-usb.c | 2 +- drivers/pinctrl/pinctrl-amd.c | 7 + drivers/pinctrl/pinctrl-falcon.c | 14 +- drivers/pinctrl/pinctrl-rockchip.c | 13 +- drivers/pinctrl/pinctrl-single.c | 1 - drivers/platform/chrome/cros_ec_dev.c | 4 - drivers/platform/chrome/cros_ec_proto.c | 13 +- drivers/platform/msm/gsi/gsi.c | 16 +- drivers/platform/msm/ipa/ipa_v2/ipa_flt.c | 7 +- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 3 +- .../platform/msm/ipa/ipa_v2/ipa_qmi_service.c | 38 +- drivers/platform/msm/ipa/ipa_v2/ipa_rt.c | 7 +- drivers/platform/msm/ipa/ipa_v3/ipa_flt.c | 7 +- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 4 +- drivers/platform/msm/ipa/ipa_v3/ipa_rt.c | 5 +- drivers/platform/x86/acer-wmi.c | 9 +- drivers/platform/x86/apple-gmux.c | 2 +- drivers/platform/x86/hp_accel.c | 24 +- drivers/platform/x86/intel_scu_ipc.c | 2 +- drivers/platform/x86/thinkpad_acpi.c | 45 +- drivers/platform/x86/toshiba_acpi.c | 1 - drivers/platform/x86/wmi.c | 9 +- drivers/power/ab8500_btemp.c | 1 - drivers/power/ab8500_charger.c | 19 +- drivers/power/ab8500_fg.c | 1 - drivers/power/bq25890_charger.c | 4 +- drivers/power/charger-manager.c | 1 - drivers/power/generic-adc-battery.c | 2 +- drivers/power/max17042_battery.c | 16 +- drivers/power/power_supply_sysfs.c | 1 - drivers/power/reset/gpio-poweroff.c | 1 - drivers/power/reset/ltc2952-poweroff.c | 4 +- drivers/power/rt5033_battery.c | 2 +- drivers/power/s3c_adc_battery.c | 2 +- drivers/power/supply/qcom/fg-core.h | 3 +- drivers/power/supply/qcom/fg-memif.c | 60 +- drivers/power/supply/qcom/fg-reg.h | 17 +- drivers/power/supply/qcom/fg-util.c | 6 +- drivers/power/supply/qcom/qpnp-fg-gen3.c | 105 - drivers/ps3/ps3stor_lib.c | 2 +- drivers/ptp/ptp_pch.c | 1 - drivers/pwm/pwm-spear.c | 4 + drivers/regulator/axp20x-regulator.c | 7 +- drivers/regulator/da9052-regulator.c | 3 +- drivers/rtc/rtc-cmos.c | 3 - drivers/rtc/rtc-proc.c | 4 +- drivers/rtc/rtc-tps65910.c | 2 +- drivers/s390/block/dasd.c | 3 +- drivers/s390/block/dasd_alias.c | 13 +- drivers/s390/cio/chp.c | 3 - drivers/s390/cio/chsc.c | 2 + drivers/s390/scsi/zfcp_fc.c | 13 +- drivers/scsi/BusLogic.c | 6 +- drivers/scsi/BusLogic.h | 2 +- drivers/scsi/FlashPoint.c | 32 +- drivers/scsi/advansys.c | 4 +- drivers/scsi/aic7xxx/aic7xxx_core.c | 2 +- drivers/scsi/be2iscsi/be_main.c | 1 - drivers/scsi/bnx2fc/Kconfig | 1 - drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 41 +- drivers/scsi/bnx2i/Kconfig | 1 - drivers/scsi/csiostor/csio_lnode.c | 2 +- drivers/scsi/dc395x.c | 1 - drivers/scsi/fnic/fnic_main.c | 1 - drivers/scsi/gdth.h | 3 + drivers/scsi/hosts.c | 10 +- drivers/scsi/ibmvscsi/ibmvfc.c | 4 +- drivers/scsi/jazz_esp.c | 4 +- drivers/scsi/libfc/fc_exch.c | 16 +- drivers/scsi/libfc/fc_lport.c | 2 +- drivers/scsi/libiscsi.c | 172 +- drivers/scsi/libsas/sas_ata.c | 9 +- drivers/scsi/libsas/sas_port.c | 4 +- drivers/scsi/lpfc/lpfc_debugfs.c | 4 +- drivers/scsi/lpfc/lpfc_els.c | 9 - drivers/scsi/lpfc/lpfc_nportdisc.c | 2 + drivers/scsi/lpfc/lpfc_sli.c | 2 +- drivers/scsi/megaraid/megaraid_mm.c | 21 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 6 +- drivers/scsi/pm8001/pm8001_init.c | 3 +- drivers/scsi/qla2xxx/qla_gbl.h | 2 + drivers/scsi/qla2xxx/qla_mr.c | 23 + drivers/scsi/qla2xxx/qla_nx.c | 3 +- drivers/scsi/qla2xxx/qla_os.c | 27 +- drivers/scsi/qla2xxx/qla_target.c | 2 - drivers/scsi/qla2xxx/qla_target.h | 2 +- drivers/scsi/qla2xxx/qla_tmpl.c | 9 +- drivers/scsi/qla2xxx/qla_tmpl.h | 2 +- drivers/scsi/scsi.c | 4 +- drivers/scsi/scsi_lib.c | 1 - drivers/scsi/scsi_scan.c | 3 +- drivers/scsi/scsi_sysfs.c | 9 - drivers/scsi/scsi_transport_iscsi.c | 152 +- drivers/scsi/sd.c | 9 +- drivers/scsi/ses.c | 2 +- drivers/scsi/sni_53c710.c | 5 +- drivers/scsi/sr.c | 4 +- drivers/scsi/sr_vendor.c | 4 +- drivers/scsi/st.c | 2 +- drivers/scsi/sun3x_esp.c | 4 +- drivers/scsi/ufs/ufs-qcom.c | 1 + drivers/scsi/ufs/ufshcd.c | 12 +- drivers/scsi/virtio_scsi.c | 4 +- drivers/scsi/vmw_pvscsi.c | 11 +- drivers/sh/maple/maple.c | 5 +- drivers/soc/qcom/msm_minidump.c | 15 +- drivers/soc/tegra/fuse/fuse-tegra.c | 2 +- drivers/soc/tegra/fuse/fuse.h | 2 +- drivers/soc/tegra/fuse/speedo-tegra210.c | 2 +- drivers/soc/ti/knav_dma.c | 13 +- drivers/soc/ti/knav_qmss_queue.c | 4 +- drivers/spi/Kconfig | 3 - drivers/spi/spi-bcm2835.c | 22 +- drivers/spi/spi-bcm2835aux.c | 17 +- drivers/spi/spi-cadence.c | 6 +- drivers/spi/spi-dln2.c | 2 +- drivers/spi/spi-img-spfi.c | 4 +- drivers/spi/spi-meson-spifc.c | 1 - drivers/spi/spi-mt65xx.c | 2 +- drivers/spi/spi-omap-100k.c | 8 +- drivers/spi/spi-pl022.c | 5 +- drivers/spi/spi-pxa2xx.c | 3 +- drivers/spi/spi-rb4xx.c | 2 +- drivers/spi/spi-s3c24xx-fiq.S | 9 +- drivers/spi/spi-sh.c | 14 +- drivers/spi/spi-sun6i.c | 6 +- drivers/spi/spi-tegra114.c | 2 - drivers/spi/spi-tegra20-sflash.c | 1 - drivers/spi/spi-tegra20-slink.c | 6 +- drivers/spi/spi-ti-qspi.c | 1 - drivers/spi/spi-topcliff-pch.c | 4 +- drivers/spi/spi.c | 70 +- drivers/ssb/sdio.c | 1 + drivers/staging/android/ion/ion.c | 6 - drivers/staging/android/vsoc.c | 3 +- .../staging/comedi/drivers/addi_apci_1032.c | 4 +- .../staging/comedi/drivers/addi_apci_1500.c | 18 +- drivers/staging/comedi/drivers/adv_pci1710.c | 10 +- drivers/staging/comedi/drivers/cb_pcidas.c | 2 +- drivers/staging/comedi/drivers/cb_pcidas64.c | 2 +- drivers/staging/comedi/drivers/das6402.c | 2 +- drivers/staging/comedi/drivers/das800.c | 2 +- drivers/staging/comedi/drivers/dmm32at.c | 2 +- drivers/staging/comedi/drivers/dt9812.c | 119 +- drivers/staging/comedi/drivers/me4000.c | 2 +- drivers/staging/comedi/drivers/mf6x4.c | 3 +- drivers/staging/comedi/drivers/ni_usb6501.c | 14 +- drivers/staging/comedi/drivers/pcl711.c | 2 +- drivers/staging/comedi/drivers/pcl818.c | 2 +- drivers/staging/comedi/drivers/vmk80xx.c | 34 +- drivers/staging/emxx_udc/emxx_udc.c | 4 +- drivers/staging/fbtft/fbtft.h | 5 +- drivers/staging/fwserial/fwserial.c | 2 - drivers/staging/gdm724x/gdm_lte.c | 20 +- drivers/staging/iio/cdc/ad7746.c | 1 + drivers/staging/iio/light/tsl2583.c | 9 - drivers/staging/media/omap4iss/iss.c | 4 +- drivers/staging/most/aim-sound/sound.c | 2 - .../dp/inc/cdp_txrx_peer_ops.h | 14 +- .../wmi/inc/wmi_unified_api.h | 6 +- .../qca-wifi-host-cmn/wmi/src/wmi_unified.c | 8 +- .../qcacld-3.0/core/dp/txrx/ol_rx_defrag.c | 36 +- .../qcacld-3.0/core/dp/txrx/ol_rx_fwd.c | 23 +- .../qcacld-3.0/core/dp/txrx/ol_rx_pn.c | 37 +- .../qcacld-3.0/core/dp/txrx/ol_rx_pn.h | 14 +- .../staging/qcacld-3.0/core/dp/txrx/ol_txrx.c | 40 +- .../qcacld-3.0/core/dp/txrx/ol_txrx_types.h | 4 +- .../core/hdd/src/wlan_hdd_cfg80211.c | 12 +- .../qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c | 95 +- .../core/hdd/src/wlan_hdd_nan_datapath.h | 2 +- .../core/hdd/src/wlan_hdd_softap_tx_rx.c | 9 +- .../qcacld-3.0/core/mac/src/include/dot11f.h | 4 +- .../core/mac/src/pe/lim/lim_assoc_utils.c | 92 +- .../src/pe/lim/lim_process_assoc_req_frame.c | 28 +- .../core/mac/src/pe/lim/lim_process_fils.c | 7 +- .../src/pe/lim/lim_process_mlm_rsp_messages.c | 12 +- .../core/mac/src/pe/lim/lim_types.h | 19 +- .../qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c | 2 +- .../core/mac/src/pe/sch/sch_beacon_gen.c | 7 +- .../mac/src/sys/legacy/src/utils/src/dot11f.c | 22 +- drivers/staging/qcacld-3.0/core/wma/inc/wma.h | 2 - .../qcacld-3.0/core/wma/src/wma_mgmt.c | 18 +- .../core/wma/src/wma_nan_datapath.c | 6 - drivers/staging/rtl8188eu/core/rtw_ap.c | 5 - .../staging/rtl8188eu/os_dep/ioctl_linux.c | 6 +- drivers/staging/rtl8188eu/os_dep/usb_intf.c | 1 - drivers/staging/rtl8192e/rtl8192e/rtl_core.c | 3 +- drivers/staging/rtl8192e/rtl8192e/rtl_wx.c | 7 +- drivers/staging/rtl8192e/rtllib.h | 2 +- drivers/staging/rtl8192e/rtllib_rx.c | 2 +- drivers/staging/rtl8192u/r8192U_core.c | 20 +- drivers/staging/rtl8192u/r8192U_wx.c | 6 +- drivers/staging/rtl8712/rtl871x_cmd.c | 6 +- drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 2 +- drivers/staging/rtl8712/usb_ops_linux.c | 2 +- drivers/staging/speakup/speakup_dectlk.c | 2 +- drivers/target/iscsi/iscsi_target_tpg.c | 3 - drivers/target/target_core_alua.c | 1 + drivers/target/target_core_device.c | 2 - drivers/target/target_core_internal.h | 1 - drivers/target/target_core_pscsi.c | 3 +- drivers/target/target_core_sbc.c | 35 +- drivers/target/target_core_transport.c | 104 +- drivers/target/target_core_xcopy.c | 220 +- drivers/target/target_core_xcopy.h | 1 - drivers/thermal/fair_share.c | 4 - drivers/thermal/msm_lmh_dcvs.c | 1 + drivers/thermal/samsung/exynos_tmu.c | 1 - drivers/thermal/thermal_core.c | 2 +- drivers/tty/hvc/hvc_xen.c | 47 +- drivers/tty/hvc/hvsi.c | 19 +- drivers/tty/n_gsm.c | 8 +- drivers/tty/n_tty.c | 4 +- drivers/tty/nozomi.c | 9 +- drivers/tty/serial/8250/8250_dw.c | 2 +- drivers/tty/serial/8250/8250_gsc.c | 2 +- drivers/tty/serial/8250/8250_omap.c | 5 + drivers/tty/serial/8250/8250_pci.c | 102 +- drivers/tty/serial/8250/8250_port.c | 15 +- drivers/tty/serial/8250/serial_cs.c | 12 +- drivers/tty/serial/amba-pl010.c | 3 + drivers/tty/serial/amba-pl011.c | 28 +- drivers/tty/serial/atmel_serial.c | 14 - drivers/tty/serial/fsl_lpuart.c | 3 - drivers/tty/serial/jsm/jsm_neo.c | 2 - drivers/tty/serial/jsm/jsm_tty.c | 3 - drivers/tty/serial/msm_serial.c | 3 - drivers/tty/serial/rp2.c | 52 +- drivers/tty/serial/serial_core.c | 3 +- drivers/tty/serial/stm32-usart.c | 2 +- drivers/tty/tty_buffer.c | 3 - drivers/tty/tty_io.c | 63 +- drivers/tty/vt/consolemap.c | 2 +- drivers/tty/vt/vt.c | 23 +- drivers/tty/vt/vt_ioctl.c | 22 +- drivers/usb/chipidea/ci_hdrc_imx.c | 9 +- drivers/usb/chipidea/core.c | 21 +- drivers/usb/class/cdc-acm.c | 36 +- drivers/usb/class/usblp.c | 40 +- drivers/usb/core/config.c | 2 +- drivers/usb/core/hcd.c | 23 +- drivers/usb/core/hub.c | 104 +- drivers/usb/core/hub.h | 6 +- drivers/usb/core/quirks.c | 10 - drivers/usb/core/urb.c | 12 - drivers/usb/dwc2/core.h | 2 - drivers/usb/dwc2/gadget.c | 13 +- drivers/usb/dwc2/hcd_intr.c | 14 +- drivers/usb/dwc3/ep0.c | 3 - drivers/usb/dwc3/gadget.c | 34 +- drivers/usb/dwc3/ulpi.c | 20 +- drivers/usb/gadget/Kconfig | 2 - drivers/usb/gadget/composite.c | 31 +- drivers/usb/gadget/configfs.c | 4 +- drivers/usb/gadget/function/f_accessory.c | 147 +- drivers/usb/gadget/function/f_eem.c | 47 +- drivers/usb/gadget/function/f_fs.c | 22 +- drivers/usb/gadget/function/f_hid.c | 2 +- drivers/usb/gadget/function/f_ncm.c | 2 +- drivers/usb/gadget/function/f_printer.c | 1 - drivers/usb/gadget/function/f_rndis.c | 4 +- drivers/usb/gadget/function/f_sourcesink.c | 1 - drivers/usb/gadget/function/f_uac2.c | 71 +- drivers/usb/gadget/function/f_uvc.c | 7 +- drivers/usb/gadget/function/rndis.c | 9 +- drivers/usb/gadget/function/u_ether.c | 5 +- drivers/usb/gadget/legacy/acm_ms.c | 4 +- drivers/usb/gadget/legacy/dbgp.c | 15 +- drivers/usb/gadget/legacy/ether.c | 4 +- drivers/usb/gadget/legacy/hid.c | 8 +- drivers/usb/gadget/legacy/inode.c | 26 +- drivers/usb/gadget/udc/Kconfig | 1 - drivers/usb/gadget/udc/at91_udc.c | 4 +- drivers/usb/gadget/udc/bdc/Kconfig | 2 +- drivers/usb/gadget/udc/dummy_hcd.c | 25 +- drivers/usb/gadget/udc/fotg210-udc.c | 26 +- drivers/usb/gadget/udc/mv_u3d_core.c | 19 +- drivers/usb/gadget/udc/pch_udc.c | 49 +- drivers/usb/gadget/udc/r8a66597-udc.c | 4 +- drivers/usb/gadget/udc/udc-core.c | 13 +- drivers/usb/gadget/udc/udc-xilinx.c | 6 - drivers/usb/gadget/usbstring.c | 4 +- drivers/usb/host/ehci-hcd.c | 12 - drivers/usb/host/ehci-omap.c | 1 - drivers/usb/host/ehci-orion.c | 8 +- drivers/usb/host/ehci-pci.c | 3 - drivers/usb/host/fotg210-hcd.c | 9 +- drivers/usb/host/fotg210.h | 5 + drivers/usb/host/max3421-hcd.c | 69 +- drivers/usb/host/ohci-hcd.c | 2 +- drivers/usb/host/ohci-tmio.c | 5 +- drivers/usb/host/oxu210hp-hcd.c | 4 +- drivers/usb/host/sl811-hcd.c | 9 +- drivers/usb/host/xhci-ext-caps.h | 5 +- drivers/usb/host/xhci-hub.c | 10 +- drivers/usb/host/xhci-pci.c | 5 - drivers/usb/host/xhci-ring.c | 2 - drivers/usb/host/xhci.c | 31 +- drivers/usb/misc/ftdi-elan.c | 1 - drivers/usb/misc/iowarrior.c | 8 +- drivers/usb/misc/sisusbvga/Kconfig | 2 +- drivers/usb/misc/trancevibrator.c | 4 +- drivers/usb/misc/uss720.c | 1 - drivers/usb/misc/yurex.c | 3 - drivers/usb/musb/tusb6010.c | 6 - drivers/usb/phy/phy-fsl-usb.c | 2 - drivers/usb/phy/phy-isp1301.c | 2 +- drivers/usb/phy/phy-tahvo.c | 4 +- drivers/usb/phy/phy-twl6030-usb.c | 5 - drivers/usb/renesas_usbhs/fifo.c | 9 - drivers/usb/renesas_usbhs/pipe.c | 2 - drivers/usb/serial/ch341.c | 6 +- drivers/usb/serial/cp210x.c | 11 - drivers/usb/serial/digi_acceleport.c | 45 +- drivers/usb/serial/ftdi_sio.c | 8 - drivers/usb/serial/ftdi_sio_ids.h | 14 - drivers/usb/serial/io_edgeport.c | 26 +- drivers/usb/serial/iuu_phoenix.c | 20 +- drivers/usb/serial/keyspan.c | 15 +- drivers/usb/serial/keyspan_pda.c | 65 +- drivers/usb/serial/kl5kusb105.c | 10 +- drivers/usb/serial/mos7720.c | 10 +- drivers/usb/serial/mos7840.c | 6 +- drivers/usb/serial/omninet.c | 2 - drivers/usb/serial/option.c | 89 +- drivers/usb/serial/pl2303.c | 1 - drivers/usb/serial/pl2303.h | 1 - drivers/usb/serial/qcserial.c | 1 - drivers/usb/serial/quatech2.c | 6 +- drivers/usb/storage/unusual_devs.h | 20 - drivers/usb/storage/unusual_uas.h | 14 - drivers/usb/usbip/stub_dev.c | 42 +- drivers/usb/usbip/vhci_sysfs.c | 10 +- drivers/vfio/pci/vfio_pci_config.c | 2 +- drivers/vfio/platform/vfio_platform_common.c | 2 +- drivers/vhost/net.c | 6 +- drivers/vhost/vringh.c | 2 +- drivers/video/backlight/lm3630a_bl.c | 12 +- drivers/video/console/fbcon.c | 2 +- drivers/video/console/sticon.c | 12 +- drivers/video/console/vgacon.c | 68 +- drivers/video/fbdev/Kconfig | 2 +- drivers/video/fbdev/asiliantfb.c | 3 - drivers/video/fbdev/chipsfb.c | 2 +- drivers/video/fbdev/core/fbcmap.c | 8 +- drivers/video/fbdev/core/fbmem.c | 11 - drivers/video/fbdev/hgafb.c | 21 +- drivers/video/fbdev/hyperv_fb.c | 6 +- drivers/video/fbdev/imsttfb.c | 5 + drivers/video/fbdev/kyro/fbdev.c | 8 - drivers/video/fbdev/msm/mdss_fb.c | 45 +- drivers/video/fbdev/msm/mdss_hdmi_cec.c | 10 +- drivers/video/fbdev/msm/mdss_hdmi_edid.c | 128 - drivers/video/fbdev/msm/mdss_hdmi_edid.h | 2 - drivers/video/fbdev/msm/mdss_hdmi_tx.c | 114 +- drivers/video/fbdev/riva/fbdev.c | 3 - drivers/virtio/virtio_ring.c | 6 +- drivers/w1/slaves/w1_ds28e04.c | 26 +- drivers/watchdog/f71808e_wdt.c | 4 +- drivers/watchdog/lpc18xx_wdt.c | 2 +- drivers/watchdog/sbc60xxwdt.c | 2 +- drivers/watchdog/sc520_wdt.c | 2 +- drivers/watchdog/w83877f_wdt.c | 2 +- drivers/xen/events/events_2l.c | 22 +- drivers/xen/events/events_base.c | 165 +- drivers/xen/events/events_fifo.c | 7 + drivers/xen/events/events_internal.h | 22 +- drivers/xen/gntdev.c | 33 +- .../xen/xen-pciback/conf_space_capability.c | 2 +- drivers/xen/xen-pciback/vpci.c | 14 +- drivers/xen/xen-pciback/xenbus.c | 24 +- drivers/xen/xen-scsiback.c | 4 +- drivers/xen/xenbus/xenbus_client.c | 8 +- drivers/xen/xenbus/xenbus_probe.c | 28 +- drivers/xen/xenbus/xenbus_probe.h | 2 - drivers/xen/xenbus/xenbus_probe_backend.c | 7 - drivers/xen/xenbus/xenbus_xs.c | 38 +- fs/Kconfig.binfmt | 8 - fs/btrfs/Kconfig | 2 - fs/btrfs/async-thread.c | 14 - fs/btrfs/backref.c | 21 +- fs/btrfs/compression.c | 2 +- fs/btrfs/ctree.c | 8 - fs/btrfs/file.c | 4 +- fs/btrfs/free-space-cache.c | 6 +- fs/btrfs/inode.c | 2 +- fs/btrfs/qgroup.c | 4 +- fs/btrfs/relocation.c | 10 +- fs/btrfs/scrub.c | 17 +- fs/btrfs/tests/btrfs-tests.c | 8 +- fs/btrfs/transaction.c | 6 +- fs/btrfs/tree-log.c | 19 +- fs/btrfs/volumes.c | 7 +- fs/ceph/addr.c | 10 +- fs/ceph/caps.c | 13 +- fs/cifs/cifs_unicode.c | 9 +- fs/cifs/cifsfs.c | 3 +- fs/cifs/connect.c | 7 +- fs/cifs/dir.c | 22 +- fs/cifs/file.c | 1 - fs/cifs/sess.c | 2 +- fs/cifs/smb2misc.c | 4 +- fs/cifs/smb2ops.c | 2 - fs/configfs/dir.c | 14 - fs/direct-io.c | 5 +- fs/dlm/debug_fs.c | 1 - fs/dlm/lock.c | 9 - fs/dlm/lowcomms.c | 2 +- fs/ecryptfs/crypto.c | 6 +- fs/ecryptfs/main.c | 6 - fs/exec.c | 2 +- fs/ext2/balloc.c | 14 +- fs/ext4/block_validity.c | 71 +- fs/ext4/dir.c | 6 +- fs/ext4/ext4.h | 6 +- fs/ext4/extents.c | 62 +- fs/ext4/extents_status.c | 4 +- fs/ext4/ialloc.c | 59 +- fs/ext4/indirect.c | 6 +- fs/ext4/inline.c | 16 +- fs/ext4/inode.c | 19 +- fs/ext4/ioctl.c | 5 +- fs/ext4/mballoc.c | 13 +- fs/ext4/migrate.c | 23 +- fs/ext4/namei.c | 59 +- fs/ext4/super.c | 13 +- fs/f2fs/file.c | 3 +- fs/file.c | 82 +- fs/file_table.c | 9 +- fs/fs-writeback.c | 36 +- fs/fuse/cuse.c | 2 - fs/fuse/dev.c | 21 +- fs/fuse/fuse_i.h | 2 +- fs/gfs2/glock.c | 2 +- fs/gfs2/lock_dlm.c | 13 +- fs/gfs2/rgrp.c | 4 - fs/hfs/bfind.c | 14 +- fs/hfs/bnode.c | 25 +- fs/hfs/btree.h | 7 - fs/hfs/super.c | 10 +- fs/hugetlbfs/inode.c | 7 +- fs/isofs/dir.c | 1 - fs/isofs/inode.c | 2 - fs/isofs/namei.c | 1 - fs/jffs2/compr_rtime.c | 3 - fs/jffs2/readinode.c | 16 - fs/jffs2/scan.c | 2 +- fs/jffs2/summary.c | 3 - fs/jfs/inode.c | 3 +- fs/jfs/jfs_dmap.c | 2 +- fs/jfs/jfs_dmap.h | 2 +- fs/jfs/jfs_filsys.h | 1 - fs/jfs/jfs_logmgr.c | 1 - fs/jfs/jfs_mount.c | 61 +- fs/lockd/host.c | 20 +- fs/namespace.c | 42 +- fs/nfs/Kconfig | 2 +- fs/nfs/client.c | 2 +- fs/nfs/filelayout/filelayout.c | 2 +- fs/nfs/flexfilelayout/flexfilelayout.c | 2 +- fs/nfs/inode.c | 10 +- fs/nfs/internal.h | 12 +- fs/nfs/nfs3proc.c | 4 +- fs/nfs/nfs3xdr.c | 3 +- fs/nfs/nfs42proc.c | 5 +- fs/nfs/nfs42xdr.c | 3 +- fs/nfs/nfs4client.c | 5 +- fs/nfs/nfs4file.c | 2 +- fs/nfs/nfs4proc.c | 23 +- fs/nfs/nfs4state.c | 3 - fs/nfs/nfs4xdr.c | 9 +- fs/nfs/pagelist.c | 12 +- fs/nfs_common/grace.c | 6 +- fs/nfsd/nfs3proc.c | 5 - fs/nfsd/nfs3xdr.c | 7 +- fs/nfsd/nfs4proc.c | 5 +- fs/nfsd/nfs4state.c | 13 +- fs/nfsd/nfs4xdr.c | 19 +- fs/nfsd/nfsctl.c | 5 +- fs/nilfs2/sysfs.c | 27 +- fs/ntfs/inode.c | 8 +- fs/ocfs2/file.c | 82 +- fs/ocfs2/super.c | 14 +- fs/overlayfs/dir.c | 10 +- fs/pipe.c | 17 +- fs/posix_acl.c | 7 +- fs/proc/array.c | 189 +- fs/proc/base.c | 220 +- fs/proc/cpu_time_stat.c | 40 +- fs/proc/kcore.c | 18 +- fs/proc/stat.c | 49 +- fs/proc/vmcore.c | 15 +- fs/qnx4/dir.c | 69 +- fs/quota/dquot.c | 11 +- fs/quota/quota_tree.c | 23 +- fs/reiserfs/journal.c | 14 - fs/reiserfs/stree.c | 27 - fs/reiserfs/super.c | 8 - fs/reiserfs/xattr.h | 2 +- fs/sdcardfs/dentry.c | 10 +- fs/sdcardfs/file.c | 13 + fs/sdcardfs/inode.c | 13 +- fs/sdcardfs/lookup.c | 123 + fs/sdfat/Kconfig | 9 +- fs/sdfat/amap_smart.c | 3 +- fs/sdfat/blkdev.c | 30 +- fs/sdfat/core.c | 39 +- fs/sdfat/core.h | 10 +- fs/sdfat/core_exfat.c | 63 +- fs/sdfat/core_fat.c | 144 +- fs/sdfat/fatent.c | 7 + fs/sdfat/misc.c | 25 +- fs/sdfat/mpage.c | 177 +- fs/sdfat/sdfat.c | 204 +- fs/sdfat/sdfat.h | 36 +- fs/sdfat/sdfat_fs.h | 73 +- fs/sdfat/version.h | 2 +- fs/sdfat/xattr.c | 10 - fs/seq_file.c | 61 +- fs/signalfd.c | 12 +- fs/squashfs/export.c | 45 +- fs/squashfs/file.c | 6 +- fs/squashfs/id.c | 42 +- fs/squashfs/squashfs_fs.h | 1 - fs/squashfs/squashfs_fs_sb.h | 1 - fs/squashfs/super.c | 6 +- fs/squashfs/xattr.h | 10 +- fs/squashfs/xattr_id.c | 68 +- fs/sysfs/file.c | 55 - fs/timerfd.c | 10 - fs/tracefs/inode.c | 82 +- fs/ubifs/io.c | 13 +- fs/ubifs/super.c | 1 + fs/udf/inode.c | 9 +- fs/udf/misc.c | 13 +- fs/udf/namei.c | 4 - fs/xfs/xfs_ioctl.c | 3 +- fs/xfs/xfs_iops.c | 2 +- gen_headers_arm.bp | 3 - gen_headers_arm64.bp | 3 - include/asm-generic/tlb.h | 7 - include/asm-generic/vmlinux.lds.h | 6 +- include/crypto/internal/hash.h | 8 +- include/dt-bindings/clock/msm-clocks-8998.h | 1 + include/linux/acpi.h | 7 - include/linux/backing-dev.h | 10 - include/linux/blkdev.h | 42 +- include/linux/can/skb.h | 8 +- include/linux/compat.h | 2 + include/linux/compiler-clang.h | 14 - include/linux/compiler-gcc.h | 10 - include/linux/compiler-intel.h | 4 - include/linux/compiler.h | 2 - include/linux/console_struct.h | 1 - include/linux/cpufreq.h | 3 - include/linux/cred.h | 14 +- include/linux/device-mapper.h | 2 + include/linux/device.h | 1 - include/linux/elfcore.h | 22 - include/linux/file.h | 2 - include/linux/fs.h | 8 +- include/linux/ftrace.h | 4 +- include/linux/futex.h | 44 +- include/linux/hid.h | 23 +- include/linux/hugetlb.h | 5 +- include/linux/ide.h | 1 + include/linux/ieee80211.h | 10 - include/linux/if_macvlan.h | 3 +- include/linux/if_vlan.h | 29 +- include/linux/input.h | 1 - include/linux/intel-iommu.h | 2 - include/linux/ipc_namespace.h | 15 - include/linux/kprobes.h | 2 - include/linux/kref.h | 2 - include/linux/kvm_host.h | 11 +- include/linux/libata.h | 3 +- include/linux/lsm_hooks.h | 32 +- include/linux/mfd/abx500/ux500_chargalg.h | 2 +- include/linux/mfd/cros_ec.h | 6 +- include/linux/mfd/rt5033-private.h | 4 +- include/linux/msi.h | 2 +- include/linux/netdevice.h | 3 - include/linux/netfilter/x_tables.h | 11 +- include/linux/of.h | 1 - include/linux/pci.h | 5 +- include/linux/power/max17042_battery.h | 2 +- include/linux/power_supply.h | 1 - include/linux/prandom.h | 2 +- include/linux/rcupdate.h | 4 +- include/linux/sched.h | 231 +- include/linux/sched/sysctl.h | 61 +- include/linux/security.h | 28 +- include/linux/seq_buf.h | 2 +- include/linux/seq_file.h | 4 +- include/linux/shm.h | 13 +- include/linux/siphash.h | 14 +- include/linux/skbuff.h | 2 +- include/linux/spi/spi.h | 6 - include/linux/string.h | 34 - include/linux/sunrpc/xdr.h | 3 +- include/linux/sysfs.h | 16 - include/linux/trace_seq.h | 4 +- include/linux/tty.h | 4 - include/linux/tty_driver.h | 2 +- include/linux/u64_stats_sync.h | 7 +- include/linux/virtio_vsock.h | 3 +- include/linux/wait.h | 26 - include/linux/zpool.h | 12 + include/net/af_unix.h | 1 - include/net/bluetooth/hci_core.h | 2 - include/net/bonding.h | 8 - include/net/caif/caif_dev.h | 2 +- include/net/caif/cfcnfg.h | 2 +- include/net/caif/cfserl.h | 1 - include/net/checksum.h | 5 - include/net/dst_metadata.h | 5 +- include/net/inet_ecn.h | 1 - include/net/ip.h | 21 +- include/net/ip6_route.h | 2 +- include/net/llc.h | 4 +- include/net/llc_pdu.h | 31 +- include/net/netfilter/nf_nat_l4proto.h | 2 +- include/net/netfilter/nf_queue.h | 2 +- include/net/nfc/nci_core.h | 2 - include/net/nl802154.h | 7 +- include/net/red.h | 16 +- include/net/rtnetlink.h | 2 - include/net/sch_generic.h | 5 - include/net/sctp/constants.h | 4 +- include/net/sctp/structs.h | 2 +- include/net/sock.h | 12 +- include/scsi/libfcoe.h | 2 +- include/scsi/scsi_transport_iscsi.h | 2 - include/target/target_core_base.h | 10 +- include/trace/events/kmem.h | 55 + include/trace/events/sched.h | 787 ++- include/trace/events/writeback.h | 35 +- include/uapi/linux/const.h | 5 - include/uapi/linux/if_link.h | 1 - include/uapi/linux/input-event-codes.h | 3 +- include/uapi/linux/lightnvm.h | 2 +- include/uapi/linux/msdos_fs.h | 2 - .../uapi/linux/netfilter/nfnetlink_cthelper.h | 2 +- include/uapi/linux/netfilter/x_tables.h | 2 +- include/uapi/linux/netlink.h | 2 +- include/uapi/linux/nfc.h | 6 +- include/uapi/linux/pci_regs.h | 6 - include/uapi/linux/qbg-profile.h | 55 - include/uapi/linux/qbg.h | 179 - include/uapi/linux/serial_reg.h | 1 - include/uapi/linux/slatecom_interface.h | 93 - include/uapi/linux/sysctl.h | 2 +- include/uapi/linux/usb/ch9.h | 3 - include/xen/grant_table.h | 1 - include/xen/interface/io/ring.h | 257 +- include/xen/xenbus.h | 15 +- init/Kconfig | 39 +- init/main.c | 2 +- init/version.c | 4 +- ipc/shm.c | 176 +- kernel/Makefile | 6 + kernel/bpf/syscall.c | 3 +- kernel/cgroup.c | 4 - kernel/debug/kdb/kdb_private.h | 2 +- kernel/elfcore.c | 25 + kernel/events/core.c | 2 - kernel/exit.c | 27 +- kernel/fork.c | 41 +- kernel/futex.c | 1167 +--- kernel/futex_compat.c | 201 + kernel/irq/manage.c | 4 - kernel/kexec_file.c | 4 +- kernel/kprobes.c | 7 - kernel/locking/rtmutex-debug.c | 9 + kernel/locking/rtmutex-debug.h | 3 + kernel/locking/rtmutex.c | 292 +- kernel/locking/rtmutex.h | 2 + kernel/locking/rtmutex_common.h | 14 +- kernel/module.c | 27 +- kernel/pid.c | 4 +- kernel/power/swap.c | 5 +- kernel/power/wakelock.c | 12 +- kernel/printk/printk.c | 9 +- kernel/profile.c | 21 +- kernel/ptrace.c | 18 +- kernel/sched/Makefile | 5 +- kernel/sched/boost.c | 235 + kernel/sched/core.c | 682 ++- kernel/sched/core_ctl.c | 1171 ++++ kernel/sched/cpufreq_schedutil.c | 52 +- kernel/sched/cputime.c | 17 +- kernel/sched/deadline.c | 44 +- kernel/sched/debug.c | 44 + kernel/sched/energy.c | 10 + kernel/sched/fair.c | 2525 ++++++-- kernel/sched/features.h | 7 - kernel/sched/hmp.c | 4496 ++++++++++++++ kernel/sched/idle_task.c | 25 + kernel/sched/rt.c | 192 +- kernel/sched/sched.h | 833 ++- kernel/sched/sched_avg.c | 199 + kernel/sched/stop_task.c | 45 +- kernel/sched/tune.c | 187 +- kernel/sched/wait.c | 8 - kernel/signal.c | 17 +- kernel/sys.c | 7 + kernel/sysctl.c | 262 +- kernel/time/timekeeping.c | 3 +- kernel/trace/blktrace.c | 8 - kernel/trace/ftrace.c | 19 +- kernel/trace/ring_buffer.c | 32 +- kernel/trace/trace.c | 80 +- kernel/trace/trace.h | 70 +- kernel/trace/trace_clock.c | 44 +- kernel/trace/trace_events.c | 10 +- kernel/trace/trace_functions.c | 2 +- kernel/tracepoint.c | 80 +- kernel/tsacct.c | 7 +- kernel/workqueue.c | 35 +- lib/decompress_unlz4.c | 8 - lib/decompress_unxz.c | 2 +- lib/genalloc.c | 25 +- lib/iov_iter.c | 2 +- lib/kobject_uevent.c | 9 +- lib/seq_buf.c | 8 +- lib/siphash.c | 12 +- lib/string.c | 113 +- lib/test_bpf.c | 15 +- lib/xz/xz_dec_lzma2.c | 21 +- lib/xz/xz_dec_stream.c | 6 +- localversion-st | 1 - mm/Kconfig | 12 +- mm/Makefile | 1 + mm/backing-dev.c | 8 - mm/gup.c | 48 +- mm/huge_memory.c | 18 +- mm/hugetlb.c | 148 +- mm/kmemleak.c | 2 +- mm/ksm.c | 1 - mm/memblock.c | 49 +- mm/memory.c | 20 +- mm/mmap.c | 3 + mm/oom_kill.c | 8 +- mm/page_alloc.c | 17 +- mm/page_io.c | 11 +- mm/slab.h | 2 +- mm/slub.c | 4 +- mm/swapfile.c | 2 +- mm/vmstat.c | 5 +- mm/z3fold.c | 1108 ++++ mm/zpool.c | 31 + mm/zsmalloc.c | 21 + net/802/garp.c | 14 - net/802/mrp.c | 14 - net/9p/trans_virtio.c | 4 +- net/Makefile | 2 +- net/appletalk/ddp.c | 33 +- net/ax25/af_ax25.c | 9 +- net/batman-adv/bat_iv_ogm.c | 4 +- net/batman-adv/bridge_loop_avoidance.c | 141 +- net/batman-adv/bridge_loop_avoidance.h | 4 +- net/batman-adv/debugfs.c | 1 - net/batman-adv/fragmentation.c | 41 +- net/batman-adv/hard-interface.c | 3 - net/batman-adv/main.c | 44 +- net/batman-adv/multicast.c | 31 - net/batman-adv/multicast.h | 15 - net/batman-adv/network-coding.c | 4 +- net/batman-adv/soft-interface.c | 31 +- net/batman-adv/translation-table.c | 11 +- net/bluetooth/a2mp.c | 3 +- net/bluetooth/amp.c | 3 - net/bluetooth/cmtp/cmtp.h | 2 +- net/bluetooth/cmtp/core.c | 9 +- net/bluetooth/hci_core.c | 57 +- net/bluetooth/hci_event.c | 43 +- net/bluetooth/hci_sock.c | 49 +- net/bluetooth/hci_sysfs.c | 3 - net/bluetooth/hidp/core.c | 5 +- net/bluetooth/l2cap_core.c | 4 - net/bluetooth/l2cap_sock.c | 10 +- net/bluetooth/mgmt.c | 11 +- net/bluetooth/sco.c | 35 +- net/bluetooth/smp.c | 9 - net/bridge/br_netfilter_hooks.c | 14 +- net/bridge/br_vlan.c | 4 +- net/bridge/netfilter/ebt_limit.c | 1 - net/caif/caif_dev.c | 13 +- net/caif/caif_socket.c | 3 +- net/caif/caif_usb.c | 14 +- net/caif/cfcnfg.c | 16 +- net/caif/cfserl.c | 5 - net/caif/chnl_net.c | 19 +- net/can/bcm.c | 91 +- net/can/gw.c | 3 - net/can/raw.c | 82 +- net/compat.c | 2 +- net/core/dev.c | 13 +- net/core/drop_monitor.c | 11 +- net/core/fib_rules.c | 2 +- net/core/neighbour.c | 4 +- net/core/net-procfs.c | 38 +- net/core/net_namespace.c | 4 +- net/core/pktgen.c | 2 +- net/core/rtnetlink.c | 10 +- net/core/skbuff.c | 40 +- net/core/sock.c | 12 +- net/core/stream.c | 3 + net/dcb/dcbnl.c | 46 - net/dccp/dccp.h | 6 +- net/dccp/ipv6.c | 5 - net/dccp/minisocks.c | 2 - net/decnet/af_decnet.c | 27 +- net/hsr/hsr_framereg.c | 3 +- net/ieee802154/nl-mac.c | 11 +- net/ieee802154/nl-phy.c | 4 +- net/ieee802154/nl802154.c | 60 +- net/ieee802154/socket.c | 7 +- net/ipv4/af_inet.c | 15 +- net/ipv4/cipso_ipv4.c | 1 - net/ipv4/devinet.c | 2 +- net/ipv4/fib_frontend.c | 2 +- net/ipv4/icmp.c | 23 +- net/ipv4/igmp.c | 2 - net/ipv4/ip_gre.c | 4 - net/ipv4/ip_output.c | 18 +- net/ipv4/ip_tunnel.c | 10 +- net/ipv4/ipconfig.c | 13 +- net/ipv4/ipmr.c | 2 - net/ipv4/netfilter/arp_tables.c | 2 - net/ipv4/netfilter/ip_tables.c | 23 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 1 - net/ipv4/netfilter/ipt_rpfilter.c | 2 +- net/ipv4/ping.c | 12 +- net/ipv4/raw.c | 5 +- net/ipv4/route.c | 88 +- net/ipv4/tcp_cubic.c | 5 +- net/ipv4/tcp_ipv4.c | 9 +- net/ipv4/tcp_output.c | 10 +- net/ipv4/udp.c | 2 +- net/ipv4/udp_offload.c | 2 +- net/ipv4/xfrm4_policy.c | 3 +- net/ipv6/addrconf.c | 44 +- net/ipv6/ip6_gre.c | 3 + net/ipv6/ip6_input.c | 10 + net/ipv6/ip6_offload.c | 2 - net/ipv6/ip6_output.c | 11 +- net/ipv6/ip6_tunnel.c | 11 +- net/ipv6/ip6_vti.c | 5 +- net/ipv6/ip6mr.c | 2 - net/ipv6/mcast.c | 3 + net/ipv6/netfilter/ip6_tables.c | 24 +- net/ipv6/netfilter/ip6t_NPT.c | 2 - net/ipv6/output_core.c | 28 +- net/ipv6/route.c | 48 +- net/ipv6/sit.c | 10 +- net/ipv6/tcp_ipv6.c | 24 +- net/ipv6/xfrm6_output.c | 2 +- net/iucv/af_iucv.c | 4 +- net/key/af_key.c | 6 +- net/l2tp/l2tp_core.c | 4 +- net/lapb/lapb_out.c | 3 +- net/llc/af_llc.c | 10 +- net/llc/llc_s_ac.c | 2 +- net/mac80211/agg-tx.c | 2 +- net/mac80211/cfg.c | 4 +- net/mac80211/driver-ops.c | 5 +- net/mac80211/ibss.c | 2 - net/mac80211/ieee80211_i.h | 37 +- net/mac80211/iface.c | 15 +- net/mac80211/key.c | 7 - net/mac80211/key.h | 2 - net/mac80211/main.c | 20 +- net/mac80211/mlme.c | 7 +- net/mac80211/rate.c | 3 +- net/mac80211/rx.c | 162 +- net/mac80211/sta_info.c | 4 - net/mac80211/sta_info.h | 31 - net/mac80211/wpa.c | 18 +- net/mac802154/llsec.c | 2 +- net/netfilter/Kconfig | 2 +- net/netfilter/ipset/ip_set_hash_gen.h | 24 +- net/netfilter/ipvs/ip_vs_conn.c | 4 - net/netfilter/ipvs/ip_vs_ctl.c | 7 +- net/netfilter/nf_nat_proto_common.c | 7 +- net/netfilter/nf_nat_proto_dccp.c | 5 +- net/netfilter/nf_nat_proto_sctp.c | 5 +- net/netfilter/nf_nat_proto_tcp.c | 5 +- net/netfilter/nf_nat_proto_udp.c | 5 +- net/netfilter/nf_nat_proto_udplite.c | 5 +- net/netfilter/nf_queue.c | 23 +- net/netfilter/nf_synproxy_core.c | 5 - net/netfilter/nfnetlink_cthelper.c | 8 +- net/netfilter/nfnetlink_queue.c | 14 +- net/netfilter/nft_dynset.c | 4 +- net/netfilter/nft_exthdr.c | 3 - net/netfilter/nft_nat.c | 4 +- net/netfilter/x_tables.c | 73 +- net/netfilter/xt_CT.c | 3 - net/netfilter/xt_IDLETIMER.c | 1 - net/netfilter/xt_LED.c | 1 - net/netfilter/xt_RATEEST.c | 4 - net/netfilter/xt_TEE.c | 2 - net/netfilter/xt_bpf.c | 1 - net/netfilter/xt_connlimit.c | 1 - net/netfilter/xt_hashlimit.c | 2 - net/netfilter/xt_limit.c | 1 - net/netfilter/xt_nfacct.c | 1 - net/netfilter/xt_qtaguid.c | 30 +- net/netfilter/xt_quota.c | 1 - net/netfilter/xt_quota2.c | 25 +- net/netfilter/xt_rateest.c | 1 - net/netfilter/xt_recent.c | 12 +- net/netfilter/xt_statistic.c | 1 - net/netfilter/xt_string.c | 1 - net/netlabel/netlabel_cipso_v4.c | 12 +- net/netlabel/netlabel_mgmt.c | 19 +- net/netlink/af_netlink.c | 29 +- net/netrom/nr_timer.c | 20 +- net/nfc/af_nfc.c | 3 - net/nfc/core.c | 32 +- net/nfc/digital_core.c | 9 +- net/nfc/digital_dep.c | 2 - net/nfc/digital_technology.c | 8 +- net/nfc/llcp_sock.c | 21 - net/nfc/nci/core.c | 31 +- net/nfc/nci/hci.c | 5 - net/nfc/nci/rsp.c | 2 - net/nfc/netlink.c | 13 +- net/nfc/rawsock.c | 4 +- net/openvswitch/actions.c | 54 +- net/packet/af_packet.c | 10 +- net/phonet/pep.c | 3 - net/rds/recv.c | 2 +- net/rose/rose_loopback.c | 17 +- net/rxrpc/ar-key.c | 6 +- net/sched/cls_tcindex.c | 8 +- net/sched/sch_api.c | 3 +- net/sched/sch_choke.c | 7 +- net/sched/sch_dsmark.c | 3 +- net/sched/sch_fifo.c | 3 - net/sched/sch_generic.c | 1 - net/sched/sch_gred.c | 2 +- net/sched/sch_qfq.c | 6 +- net/sched/sch_red.c | 7 +- net/sched/sch_sfq.c | 2 +- net/sched/sch_teql.c | 3 - net/sctp/bind_addr.c | 20 +- net/sctp/input.c | 11 +- net/sctp/ipv6.c | 7 +- net/sctp/protocol.c | 10 +- net/sctp/sm_make_chunk.c | 44 +- net/sctp/sm_statefuns.c | 7 +- net/sctp/socket.c | 31 +- net/sunrpc/addr.c | 2 +- net/sunrpc/auth_gss/auth_gss.c | 30 +- net/sunrpc/auth_gss/auth_gss_internal.h | 45 - net/sunrpc/auth_gss/gss_krb5_mech.c | 31 +- net/sunrpc/auth_gss/svcauth_gss.c | 13 +- net/sunrpc/sched.c | 12 +- net/sunrpc/svc_xprt.c | 4 +- net/tipc/link.c | 4 - net/tipc/msg.c | 9 +- net/tipc/name_distr.c | 2 +- net/tipc/netlink_compat.c | 2 +- net/tipc/socket.c | 14 +- net/unix/Kconfig | 5 - net/unix/Makefile | 2 - net/unix/af_unix.c | 158 +- net/unix/garbage.c | 82 +- net/unix/scm.c | 163 - net/unix/scm.h | 10 - net/vmw_vsock/af_vsock.c | 11 +- net/vmw_vsock/virtio_transport_common.c | 4 - net/vmw_vsock/vmci_transport.c | 3 +- net/wireguard/Makefile | 5 +- net/wireguard/allowedips.c | 188 +- net/wireguard/allowedips.h | 14 +- net/wireguard/compat/Makefile.include | 1 - net/wireguard/compat/compat-asm.h | 15 +- net/wireguard/compat/compat.h | 116 +- net/wireguard/compat/dst_cache/dst_cache.c | 2 - .../compat/simd/include/linux/simd.h | 3 +- .../compat/siphash/include/linux/siphash.h | 14 +- net/wireguard/compat/siphash/siphash.c | 48 +- net/wireguard/compat/udp_tunnel/udp_tunnel.c | 12 +- net/wireguard/compat/version/linux/version.h | 10 - .../zinc/curve25519/curve25519-x86_64.c | 8 +- .../crypto/zinc/curve25519/curve25519.c | 1 + net/wireguard/device.c | 52 +- net/wireguard/device.h | 24 +- net/wireguard/main.c | 25 +- net/wireguard/peer.c | 53 +- net/wireguard/peer.h | 11 +- net/wireguard/queueing.c | 90 +- net/wireguard/queueing.h | 45 +- net/wireguard/ratelimiter.c | 4 +- net/wireguard/receive.c | 55 +- net/wireguard/selftest/allowedips.c | 165 +- net/wireguard/send.c | 31 +- net/wireguard/socket.c | 12 +- net/wireguard/version.h | 2 +- net/wireless/nl80211.c | 2 +- net/wireless/scan.c | 4 +- net/wireless/sme.c | 2 +- net/wireless/util.c | 19 +- net/wireless/wext-core.c | 5 +- net/wireless/wext-spy.c | 14 +- net/x25/af_x25.c | 8 +- net/xfrm/xfrm_input.c | 2 +- net/xfrm/xfrm_user.c | 28 +- samples/kfifo/bytestream-example.c | 8 +- samples/kfifo/inttype-example.c | 8 +- samples/kfifo/record-example.c | 8 +- scripts/Makefile | 9 +- scripts/Makefile.build | 3 - scripts/Makefile.extrawarn | 1 - scripts/checkpatch.pl | 2 +- scripts/depmod.sh | 2 - scripts/kconfig/nconf.c | 2 +- scripts/mkcompile_h | 14 +- scripts/recordmcount.c | 2 +- scripts/recordmcount.pl | 21 +- scripts/tracing/draw_functrace.py | 6 +- security/integrity/ima/ima_fs.c | 2 +- security/integrity/integrity_audit.c | 2 - security/keys/trusted.c | 2 +- security/lsm_audit.c | 7 +- security/security.c | 14 +- security/selinux/avc.c | 13 +- security/selinux/hooks.c | 33 +- security/selinux/include/classmap.h | 2 +- security/selinux/include/security.h | 2 - security/selinux/nlmsgtab.c | 24 - security/selinux/ss/policydb.c | 4 - security/selinux/ss/policydb.h | 2 - security/selinux/ss/services.c | 3 - security/smack/smack_access.c | 17 +- security/smack/smackfs.c | 11 +- sound/core/control_compat.c | 3 - sound/core/init.c | 2 + sound/core/jack.c | 7 - sound/core/oss/mixer_oss.c | 43 +- sound/core/oss/pcm_oss.c | 59 +- sound/core/pcm.c | 6 +- sound/core/pcm_lib.c | 2 +- sound/core/seq/oss/seq_oss_synth.c | 3 +- sound/core/seq/seq_device.c | 8 +- sound/core/seq/seq_ports.c | 39 +- sound/core/seq/seq_queue.c | 14 +- sound/core/seq/seq_queue.h | 8 +- sound/core/timer.c | 20 +- sound/drivers/aloop.c | 11 +- sound/drivers/opl3/opl3_midi.c | 2 +- sound/firewire/Kconfig | 5 +- sound/firewire/bebob/bebob.c | 5 +- sound/firewire/oxfw/oxfw.c | 3 +- sound/hda/hdac_controller.c | 5 +- sound/isa/cmi8330.c | 2 +- sound/isa/gus/gus_dma.c | 2 - sound/isa/sb/emu8000.c | 4 +- sound/isa/sb/sb16_csp.c | 20 +- sound/isa/sb/sb8.c | 4 + sound/pci/ctxfi/ctamixer.c | 14 +- sound/pci/ctxfi/ctdaio.c | 16 +- sound/pci/ctxfi/cthw20k2.c | 2 +- sound/pci/ctxfi/ctresource.c | 7 +- sound/pci/ctxfi/ctresource.h | 4 +- sound/pci/ctxfi/ctsrc.c | 7 +- sound/pci/hda/hda_bind.c | 4 - sound/pci/hda/hda_codec.c | 3 - sound/pci/hda/hda_generic.c | 38 +- sound/pci/hda/hda_generic.h | 1 - sound/pci/hda/hda_intel.c | 5 +- sound/pci/hda/hda_tegra.c | 3 - sound/pci/hda/patch_ca0132.c | 16 +- sound/pci/hda/patch_conexant.c | 1 - sound/pci/hda/patch_hdmi.c | 13 - sound/pci/hda/patch_realtek.c | 44 +- sound/pci/hda/patch_via.c | 1 - sound/pci/rme9652/hdsp.c | 3 +- sound/pci/rme9652/hdspm.c | 3 +- sound/pci/rme9652/rme9652.c | 3 +- sound/ppc/powermac.c | 6 +- sound/soc/codecs/cs42l56.c | 3 +- sound/soc/codecs/rt286.c | 23 +- sound/soc/codecs/rt5640.c | 4 +- sound/soc/codecs/rt5651.c | 4 +- sound/soc/codecs/sgtl5000.c | 2 +- sound/soc/codecs/sti-sas.c | 1 - sound/soc/codecs/wm_adsp.c | 5 +- sound/soc/fsl/fsl_esai.c | 8 +- sound/soc/fsl/pcm030-audio-fabric.c | 11 +- sound/soc/intel/atom/sst-mfld-platform-pcm.c | 3 +- sound/soc/intel/boards/haswell.c | 1 - sound/soc/jz4740/jz4740-i2s.c | 4 - sound/soc/msm/msm8998.c | 27 - sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c | 15 +- sound/soc/samsung/idma.c | 2 - sound/soc/soc-core.c | 2 +- sound/soc/soc-dapm.c | 39 +- sound/soc/soc-ops.c | 58 +- sound/soc/soc-pcm.c | 2 - sound/soc/soc-topology.c | 3 - sound/soc/tegra/tegra_alc5632.c | 1 - sound/soc/tegra/tegra_max98090.c | 1 - sound/soc/tegra/tegra_rt5640.c | 1 - sound/soc/tegra/tegra_rt5677.c | 1 - sound/soc/tegra/tegra_wm8753.c | 1 - sound/soc/tegra/tegra_wm8903.c | 1 - sound/soc/tegra/tegra_wm9712.c | 1 - sound/soc/tegra/trimslice.c | 1 - sound/synth/emux/emux.c | 2 +- sound/usb/6fire/comm.c | 2 +- sound/usb/6fire/firmware.c | 6 +- sound/usb/card.c | 14 +- sound/usb/format.c | 4 - sound/usb/line6/driver.c | 12 +- sound/usb/line6/driver.h | 2 +- sound/usb/line6/toneport.c | 2 +- sound/usb/midi.c | 8 - sound/usb/misc/ua101.c | 4 +- sound/usb/pcm.c | 38 +- sound/usb/quirks-table.h | 42 - sound/usb/quirks.c | 18 +- sound/usb/stream.c | 6 +- sound/usb/usbaudio.h | 2 - tools/arch/ia64/include/asm/barrier.h | 3 + tools/perf/tests/bpf.c | 2 - tools/perf/tests/sample-parsing.c | 2 +- tools/perf/util/auxtrace.c | 4 + .../util/intel-pt-decoder/intel-pt-decoder.c | 3 - tools/perf/util/map.c | 7 +- tools/perf/util/parse-regs-options.c | 2 +- tools/perf/util/probe-file.c | 4 +- tools/perf/util/session.c | 1 - tools/testing/selftests/lib.mk | 4 - .../powerpc/pmu/ebb/no_handler_test.c | 2 + tools/testing/selftests/zram/zram.sh | 15 +- tools/testing/selftests/zram/zram01.sh | 33 +- tools/testing/selftests/zram/zram02.sh | 1 + tools/testing/selftests/zram/zram_lib.sh | 134 +- tools/usb/testusb.c | 14 +- virt/kvm/kvm_main.c | 3 +- 2225 files changed, 27020 insertions(+), 30061 deletions(-) create mode 100644 Documentation/devicetree/bindings/scheduler/sched_hmp.txt create mode 100644 Documentation/scheduler/sched-hmp.txt create mode 100644 Documentation/vm/z3fold.txt create mode 100644 arch/alpha/kernel/alpha_ksyms.c delete mode 100644 arch/arm/boot/dts/qcom/msm8998-v2.1-yoshino-poplar_kddi.dts delete mode 100644 arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_jp-common.dtsi delete mode 100644 arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_kddi.dtsi delete mode 100644 arch/arm64/configs/diffconfig/poplar_kddi_diffconfig delete mode 100644 arch/arm64/configs/lineage-msm8998-yoshino-poplar_kddi_defconfig delete mode 100644 arch/openrisc/include/asm/barrier.h create mode 100644 drivers/block/zram/zcomp_lz4.c create mode 100644 drivers/block/zram/zcomp_lz4.h create mode 100644 drivers/block/zram/zcomp_lzo.c create mode 100644 drivers/block/zram/zcomp_lzo.h delete mode 100644 drivers/misc/carillon/Kconfig delete mode 100644 drivers/misc/carillon/Makefile delete mode 100644 drivers/misc/carillon/bd7602/bd7602.c delete mode 100644 drivers/misc/carillon/bd7602/bd7602.h delete mode 100644 drivers/misc/carillon/cxd224x/cxd224x-i2c.c delete mode 100644 drivers/misc/carillon/cxd224x/cxd224x.h delete mode 100644 drivers/misc/carillon/main_module.c delete mode 100644 include/uapi/linux/qbg-profile.h delete mode 100644 include/uapi/linux/qbg.h delete mode 100644 include/uapi/linux/slatecom_interface.h create mode 100644 kernel/elfcore.c create mode 100644 kernel/futex_compat.c create mode 100644 kernel/sched/boost.c create mode 100644 kernel/sched/core_ctl.c create mode 100644 kernel/sched/hmp.c create mode 100644 kernel/sched/sched_avg.c delete mode 100644 localversion-st create mode 100644 mm/z3fold.c delete mode 100644 net/sunrpc/auth_gss/auth_gss_internal.h delete mode 100644 net/unix/scm.c delete mode 100644 net/unix/scm.h delete mode 100644 net/wireguard/compat/version/linux/version.h diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram index c1513c756af1..2e69e83bf510 100644 --- a/Documentation/ABI/testing/sysfs-block-zram +++ b/Documentation/ABI/testing/sysfs-block-zram @@ -22,6 +22,41 @@ Description: device. The reset operation frees all the memory associated with this device. +What: /sys/block/zram/num_reads +Date: August 2010 +Contact: Nitin Gupta +Description: + The num_reads file is read-only and specifies the number of + reads (failed or successful) done on this device. + +What: /sys/block/zram/num_writes +Date: August 2010 +Contact: Nitin Gupta +Description: + The num_writes file is read-only and specifies the number of + writes (failed or successful) done on this device. + +What: /sys/block/zram/invalid_io +Date: August 2010 +Contact: Nitin Gupta +Description: + The invalid_io file is read-only and specifies the number of + non-page-size-aligned I/O requests issued to this device. + +What: /sys/block/zram/failed_reads +Date: February 2014 +Contact: Sergey Senozhatsky +Description: + The failed_reads file is read-only and specifies the number of + failed reads happened on this device. + +What: /sys/block/zram/failed_writes +Date: February 2014 +Contact: Sergey Senozhatsky +Description: + The failed_writes file is read-only and specifies the number of + failed writes happened on this device. + What: /sys/block/zram/max_comp_streams Date: February 2014 Contact: Sergey Senozhatsky @@ -38,24 +73,74 @@ Description: available and selected compression algorithms, change compression algorithm selection. +What: /sys/block/zram/notify_free +Date: August 2010 +Contact: Nitin Gupta +Description: + The notify_free file is read-only. Depending on device usage + scenario it may account a) the number of pages freed because + of swap slot free notifications or b) the number of pages freed + because of REQ_DISCARD requests sent by bio. The former ones + are sent to a swap block device when a swap slot is freed, which + implies that this disk is being used as a swap disk. The latter + ones are sent by filesystem mounted with discard option, + whenever some data blocks are getting discarded. + +What: /sys/block/zram/zero_pages +Date: August 2010 +Contact: Nitin Gupta +Description: + The zero_pages file is read-only and specifies number of zero + filled pages written to this disk. No memory is allocated for + such pages. + +What: /sys/block/zram/orig_data_size +Date: August 2010 +Contact: Nitin Gupta +Description: + The orig_data_size file is read-only and specifies uncompressed + size of data stored in this disk. This excludes zero-filled + pages (zero_pages) since no memory is allocated for them. + Unit: bytes + +What: /sys/block/zram/compr_data_size +Date: August 2010 +Contact: Nitin Gupta +Description: + The compr_data_size file is read-only and specifies compressed + size of data stored in this disk. So, compression ratio can be + calculated using orig_data_size and this statistic. + Unit: bytes + +What: /sys/block/zram/mem_used_total +Date: August 2010 +Contact: Nitin Gupta +Description: + The mem_used_total file is read-only and specifies the amount + of memory, including allocator fragmentation and metadata + overhead, allocated for this disk. So, allocator space + efficiency can be calculated using compr_data_size and this + statistic. + Unit: bytes + What: /sys/block/zram/mem_used_max Date: August 2014 Contact: Minchan Kim Description: - The mem_used_max file is write-only and is used to reset - the counter of maximum memory zram have consumed to store - compressed data. For resetting the value, you should write - "0". Otherwise, you could see -EINVAL. + The mem_used_max file is read/write and specifies the amount + of maximum memory zram have consumed to store compressed data. + For resetting the value, you should write "0". Otherwise, + you could see -EINVAL. Unit: bytes What: /sys/block/zram/mem_limit Date: August 2014 Contact: Minchan Kim Description: - The mem_limit file is write-only and specifies the maximum - amount of memory ZRAM can use to store the compressed data. - The limit could be changed in run time and "0" means disable - the limit. No limit is the initial state. Unit: bytes + The mem_limit file is read/write and specifies the maximum + amount of memory ZRAM can use to store the compressed data. The + limit could be changed in run time and "0" means disable the + limit. No limit is the initial state. Unit: bytes What: /sys/block/zram/compact Date: August 2015 @@ -81,20 +166,3 @@ Description: The mm_stat file is read-only and represents device's mm statistics (orig_data_size, compr_data_size, etc.) in a format similar to block layer statistics file format. - -What: /sys/block/zram/debug_stat -Date: July 2016 -Contact: Sergey Senozhatsky -Description: - The debug_stat file is read-only and represents various - device's debugging info useful for kernel developers. Its - format is not documented intentionally and may change - anytime without any notice. - -What: /sys/block/zram/backing_dev -Date: June 2017 -Contact: Minchan Kim -Description: - The backing_dev file is read-write and set up backing - device for zram to write incompressible pages. - For using, user should enable CONFIG_ZRAM_WRITEBACK. diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 875b2b56b87f..d88f0c70cd7f 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -59,23 +59,23 @@ num_devices parameter is optional and tells zram how many devices should be pre-created. Default: 1. 2) Set max number of compression streams -Regardless the value passed to this attribute, ZRAM will always -allocate multiple compression streams - one per online CPUs - thus -allowing several concurrent compression operations. The number of -allocated compression streams goes down when some of the CPUs -become offline. There is no single-compression-stream mode anymore, -unless you are running a UP system or has only 1 CPU online. - -To find out how many streams are currently available: + Regardless the value passed to this attribute, ZRAM will always + allocate multiple compression streams - one per online CPUs - thus + allowing several concurrent compression operations. The number of + allocated compression streams goes down when some of the CPUs + become offline. There is no single-compression-stream mode anymore, + unless you are running a UP system or has only 1 CPU online. + + To find out how many streams are currently available: cat /sys/block/zram0/max_comp_streams 3) Select compression algorithm -Using comp_algorithm device attribute one can see available and -currently selected (shown in square brackets) compression algorithms, -change selected compression algorithm (once the device is initialised -there is no way to change compression algorithm). + Using comp_algorithm device attribute one can see available and + currently selected (shown in square brackets) compression algorithms, + change selected compression algorithm (once the device is initialised + there is no way to change compression algorithm). -Examples: + Examples: #show supported compression algorithms cat /sys/block/zram0/comp_algorithm lzo [lz4] @@ -83,27 +83,17 @@ Examples: #select lzo compression algorithm echo lzo > /sys/block/zram0/comp_algorithm -For the time being, the `comp_algorithm' content does not necessarily -show every compression algorithm supported by the kernel. We keep this -list primarily to simplify device configuration and one can configure -a new device with a compression algorithm that is not listed in -`comp_algorithm'. The thing is that, internally, ZRAM uses Crypto API -and, if some of the algorithms were built as modules, it's impossible -to list all of them using, for instance, /proc/crypto or any other -method. This, however, has an advantage of permitting the usage of -custom crypto compression modules (implementing S/W or H/W compression). - 4) Set Disksize -Set disk size by writing the value to sysfs node 'disksize'. -The value can be either in bytes or you can use mem suffixes. -Examples: - # Initialize /dev/zram0 with 50MB disksize - echo $((50*1024*1024)) > /sys/block/zram0/disksize + Set disk size by writing the value to sysfs node 'disksize'. + The value can be either in bytes or you can use mem suffixes. + Examples: + # Initialize /dev/zram0 with 50MB disksize + echo $((50*1024*1024)) > /sys/block/zram0/disksize - # Using mem suffixes - echo 256K > /sys/block/zram0/disksize - echo 512M > /sys/block/zram0/disksize - echo 1G > /sys/block/zram0/disksize + # Using mem suffixes + echo 256K > /sys/block/zram0/disksize + echo 512M > /sys/block/zram0/disksize + echo 1G > /sys/block/zram0/disksize Note: There is little point creating a zram of greater than twice the size of memory @@ -111,20 +101,20 @@ since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. 5) Set memory limit: Optional -Set memory limit by writing the value to sysfs node 'mem_limit'. -The value can be either in bytes or you can use mem suffixes. -In addition, you could change the value in runtime. -Examples: - # limit /dev/zram0 with 50MB memory - echo $((50*1024*1024)) > /sys/block/zram0/mem_limit + Set memory limit by writing the value to sysfs node 'mem_limit'. + The value can be either in bytes or you can use mem suffixes. + In addition, you could change the value in runtime. + Examples: + # limit /dev/zram0 with 50MB memory + echo $((50*1024*1024)) > /sys/block/zram0/mem_limit - # Using mem suffixes - echo 256K > /sys/block/zram0/mem_limit - echo 512M > /sys/block/zram0/mem_limit - echo 1G > /sys/block/zram0/mem_limit + # Using mem suffixes + echo 256K > /sys/block/zram0/mem_limit + echo 512M > /sys/block/zram0/mem_limit + echo 1G > /sys/block/zram0/mem_limit - # To disable memory limit - echo 0 > /sys/block/zram0/mem_limit + # To disable memory limit + echo 0 > /sys/block/zram0/mem_limit 6) Activate: mkswap /dev/zram0 @@ -161,15 +151,41 @@ Name access description disksize RW show and set the device's disk size initstate RO shows the initialization state of the device reset WO trigger device reset -mem_used_max WO reset the `mem_used_max' counter (see later) -mem_limit WO specifies the maximum amount of memory ZRAM can use - to store the compressed data +num_reads RO the number of reads +failed_reads RO the number of failed reads +num_write RO the number of writes +failed_writes RO the number of failed writes +invalid_io RO the number of non-page-size-aligned I/O requests max_comp_streams RW the number of possible concurrent compress operations comp_algorithm RW show and change the compression algorithm +notify_free RO the number of notifications to free pages (either + slot free notifications or REQ_DISCARD requests) +zero_pages RO the number of zero filled pages written to this disk +orig_data_size RO uncompressed size of data stored in this disk +compr_data_size RO compressed size of data stored in this disk +mem_used_total RO the amount of memory allocated for this disk +mem_used_max RW the maximum amount of memory zram have consumed to + store the data (to reset this counter to the actual + current value, write 1 to this attribute) +mem_limit RW the maximum amount of memory ZRAM can use to store + the compressed data +pages_compacted RO the number of pages freed during compaction + (available only via zram/mm_stat node) compact WO trigger memory compaction -debug_stat RO this file is used for zram debugging purposes -backing_dev RW set up backend storage for zram to write out +WARNING +======= +per-stat sysfs attributes are considered to be deprecated. +The basic strategy is: +-- the existing RW nodes will be downgraded to WO nodes (in linux 4.11) +-- deprecated RO sysfs nodes will eventually be removed (in linux 4.11) + +The list of deprecated attributes can be found here: +Documentation/ABI/obsolete/sysfs-block-zram + +Basically, every attribute that has its own read accessible sysfs node +(e.g. num_reads) *AND* is accessible via one of the stat files (zram/stat +or zram/io_stat or zram/mm_stat) is considered to be deprecated. User space is advised to use the following files to read the device statistics. @@ -184,41 +200,22 @@ The stat file represents device's I/O statistics not accounted by block layer and, thus, not available in zram/stat file. It consists of a single line of text and contains the following stats separated by whitespace: - failed_reads the number of failed reads - failed_writes the number of failed writes - invalid_io the number of non-page-size-aligned I/O requests - notify_free Depending on device usage scenario it may account - a) the number of pages freed because of swap slot free - notifications or b) the number of pages freed because of - REQ_DISCARD requests sent by bio. The former ones are - sent to a swap block device when a swap slot is freed, - which implies that this disk is being used as a swap disk. - The latter ones are sent by filesystem mounted with - discard option, whenever some data blocks are getting - discarded. + failed_reads + failed_writes + invalid_io + notify_free File /sys/block/zram/mm_stat The stat file represents device's mm statistics. It consists of a single line of text and contains the following stats separated by whitespace: - orig_data_size uncompressed size of data stored in this disk. - This excludes same-element-filled pages (same_pages) since - no memory is allocated for them. - Unit: bytes - compr_data_size compressed size of data stored in this disk - mem_used_total the amount of memory allocated for this disk. This - includes allocator fragmentation and metadata overhead, - allocated for this disk. So, allocator space efficiency - can be calculated using compr_data_size and this statistic. - Unit: bytes - mem_limit the maximum amount of memory ZRAM can use to store - the compressed data - mem_used_max the maximum amount of memory zram have consumed to - store the data - same_pages the number of same element filled pages written to this disk. - No memory is allocated for such pages. - pages_compacted the number of pages freed during compaction - huge_pages the number of incompressible pages + orig_data_size + compr_data_size + mem_used_total + mem_limit + mem_used_max + zero_pages + num_migrated 9) Deactivate: swapoff /dev/zram0 @@ -233,39 +230,5 @@ line of text and contains the following stats separated by whitespace: resets the disksize to zero. You must set the disksize again before reusing the device. -* Optional Feature - -= writeback - -With incompressible pages, there is no memory saving with zram. -Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page -to backing storage rather than keeping it in memory. -User should set up backing device via /sys/block/zramX/backing_dev -before disksize setting. - -= memory tracking - -With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the -zram block. It could be useful to catch cold or incompressible -pages of the process with*pagemap. -If you enable the feature, you could see block state via -/sys/kernel/debug/zram/zram0/block_state". The output is as follows, - - 300 75.033841 .wh - 301 63.806904 s.. - 302 63.806919 ..h - -First column is zram's block index. -Second column is access time since the system was booted -Third column is state of the block. -(s: same page -w: written page to backing store -h: huge page) - -First line of above example says 300th block is accessed at 75.033841sec -and the block's state is huge so it is written back to the backing -storage. It's a debugging feature so anyone shouldn't rely on it to work -properly. - Nitin Gupta ngupta@vflare.org diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index 3a58fdf0c566..fb733c4e1c11 100644 --- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt @@ -112,7 +112,7 @@ on various other factors also like; so the device should have enough free bytes available its OOB/Spare area to accommodate ECC for entire page. In general following expression helps in determining if given device can accommodate ECC syndrome: - "2 + (PAGESIZE / 512) * ECC_BYTES" <= OOBSIZE" + "2 + (PAGESIZE / 512) * ECC_BYTES" >= OOBSIZE" where OOBSIZE number of bytes in OOB/spare area PAGESIZE number of bytes in main-area of device page diff --git a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt index 0188bbd2e35f..5b6cd9b3f628 100644 --- a/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt +++ b/Documentation/devicetree/bindings/net/nfc/nxp-nci.txt @@ -27,7 +27,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2): clock-frequency = <100000>; interrupt-parent = <&gpio1>; - interrupts = <29 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <29 GPIO_ACTIVE_HIGH>; enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>; firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/net/nfc/pn544.txt b/Documentation/devicetree/bindings/net/nfc/pn544.txt index 8541e8dafd55..dab69f36167c 100644 --- a/Documentation/devicetree/bindings/net/nfc/pn544.txt +++ b/Documentation/devicetree/bindings/net/nfc/pn544.txt @@ -27,7 +27,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2): clock-frequency = <400000>; interrupt-parent = <&gpio1>; - interrupts = <17 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <17 GPIO_ACTIVE_HIGH>; enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/scheduler/sched_hmp.txt b/Documentation/devicetree/bindings/scheduler/sched_hmp.txt new file mode 100644 index 000000000000..ba1d4db9e407 --- /dev/null +++ b/Documentation/devicetree/bindings/scheduler/sched_hmp.txt @@ -0,0 +1,35 @@ +* HMP scheduler + +This file describes the bindings for an optional HMP scheduler +node (/sched-hmp). + +Required properties: + +Optional properties: + +- boost-policy: The HMP scheduler has two types of task placement boost +policies. + +(1) boost-on-big policy make use of all big CPUs up to their full capacity +before using the little CPUs. This improves performance on true b.L systems +where the big CPUs have higher efficiency compared to the little CPUs. + +(2) boost-on-all policy place the tasks on the CPU having the highest +spare capacity. This policy is optimal for SMP like systems. + +The scheduler sets the boost policy to boost-on-big on systems which has +CPUs of different efficiencies. However it is possible that CPUs of the +same micro architecture to have slight difference in efficiency due to +other factors like cache size. Selecting the boost-on-big policy based +on relative difference in efficiency is not optimal on such systems. +The boost-policy device tree property is introduced to specify the +required boost type and it overrides the default selection of boost +type in the scheduler. + +The possible values for this property are "boost-on-big" and "boost-on-all". + +Example: + +sched-hmp { + boost-policy = "boost-on-all" +} diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 1218a5e2975c..24da7b32c489 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -211,10 +211,12 @@ Other notes: is 4096. - show() methods should return the number of bytes printed into the - buffer. + buffer. This is the return value of scnprintf(). -- show() should only use sysfs_emit() or sysfs_emit_at() when formatting - the value to be returned to user space. +- show() must not use snprintf() when formatting the value to be + returned to user space. If you can guarantee that an overflow + will never happen you can use sprintf() otherwise you must use + scnprintf(). - store() should return the number of bytes used from the buffer. If the entire buffer has been used, just return the count argument. diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 5a6e70483ced..334b49ef02d1 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -191,12 +191,11 @@ ad_actor_sys_prio ad_actor_system In an AD system, this specifies the mac-address for the actor in - protocol packet exchanges (LACPDUs). The value cannot be a multicast - address. If the all-zeroes MAC is specified, bonding will internally - use the MAC of the bond itself. It is preferred to have the - local-admin bit set for this mac but driver does not enforce it. If - the value is not given then system defaults to using the masters' - mac address as actors' system address. + protocol packet exchanges (LACPDUs). The value cannot be NULL or + multicast. It is preferred to have the local-admin bit set for this + mac but driver does not enforce it. If the value is not given then + system defaults to using the masters' mac address as actors' system + address. This parameter has effect only in 802.3ad mode and is available through SysFs interface. diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt new file mode 100644 index 000000000000..32906610b25f --- /dev/null +++ b/Documentation/scheduler/sched-hmp.txt @@ -0,0 +1,1673 @@ +CONTENTS + +1. Introduction + 1.1 Heterogeneous Systems + 1.2 CPU Frequency Guidance +2. Window-Based Load Tracking Scheme + 2.1 Synchronized Windows + 2.2 struct ravg + 2.3 Scaling Load Statistics + 2.4 sched_window_stats_policy + 2.5 Task Events + 2.6 update_task_ravg() + 2.7 update_history() + 2.8 Per-task 'initial task load' +3. CPU Capacity + 3.1 Load scale factor + 3.2 CPU Power +4. CPU Power +5. HMP Scheduler + 5.1 Classification of Tasks and CPUs + 5.2 select_best_cpu() + 5.2.1 sched_boost + 5.2.2 task_will_fit() + 5.2.3 Tunables affecting select_best_cpu() + 5.2.4 Wakeup Logic + 5.3 Scheduler Tick + 5.4 Load Balancer + 5.5 Real Time Tasks + 5.6 Task packing +6. Frequency Guidance + 6.1 Per-CPU Window-Based Stats + 6.2 Per-task Window-Based Stats + 6.3 Effect of various task events + 6.4 Tying it all together +7. Tunables +8. HMP Scheduler Trace Points + 8.1 sched_enq_deq_task + 8.2 sched_task_load + 8.3 sched_cpu_load_* + 8.4 sched_update_task_ravg + 8.5 sched_update_history + 8.6 sched_reset_all_windows_stats + 8.7 sched_migration_update_sum + 8.8 sched_get_busy + 8.9 sched_freq_alert + 8.10 sched_set_boost +9. Device Tree bindings + +=============== +1. INTRODUCTION +=============== + +Scheduler extensions described in this document serves two goals: + +1) handle heterogeneous multi-processor (HMP) systems +2) guide cpufreq governor on proactive changes to cpu frequency + +*** 1.1 Heterogeneous systems + +Heterogeneous systems have cpus that differ with regard to their performance and +power characteristics. Some cpus could offer peak performance better than +others, although at cost of consuming more power. We shall refer such cpus as +"high performance" or "performance efficient" cpus. Other cpus that offer lesser +peak performance are referred to as "power efficient". + +In this situation the scheduler is tasked with the responsibility of assigning +tasks to run on the right cpus where their performance requirements can be met +at the least expense of power. + +Achieving that goal is made complicated by the fact that the scheduler has +little clue about performance requirements of tasks and how they may change by +running on power or performance efficient cpus! One simplifying assumption here +could be that a task's desire for more performance is expressed by its cpu +utilization. A task demanding high cpu utilization on a power-efficient cpu +would likely improve in its performance by running on a performance-efficient +cpu. This idea forms the basis for HMP-related scheduler extensions. + +Key inputs required by the HMP scheduler for its task placement decisions are: + +a) task load - this reflects cpu utilization or demand of tasks +b) CPU capacity - this reflects peak performance offered by cpus +c) CPU power - this reflects power or energy cost of cpus + +Once all 3 pieces of information are available, the HMP scheduler can place +tasks on the lowest power cpus where their demand can be satisfied. + +*** 1.2 CPU Frequency guidance + +A somewhat separate but related goal of the scheduler extensions described here +is to provide guidance to the cpufreq governor on the need to change cpu +frequency. Most governors that control cpu frequency work on a reactive basis. +CPU utilization is sampled at regular intervals, based on which the need to +change frequency is determined. Higher utilization leads to a frequency increase +and vice-versa. There are several problems with this approach that scheduler +can help resolve. + +a) latency + + Reactive nature introduces latency for cpus to ramp up to desired speed + which can hurt application performance. This is inevitable as cpufreq + governors can only track cpu utilization as a whole and not tasks which + are driving that demand. Scheduler can however keep track of individual + task demand and can alert the governor on changing task activity. For + example, request raise in frequency when tasks activity is increasing on + a cpu because of wakeup or migration or request frequency to be lowered + when task activity is decreasing because of sleep/exit or migration. + +b) part-picture + + Most governors track utilization of each CPU independently. When a task + migrates from one cpu to another the task's execution time is split + across the two cpus. The governor can fail to see the full picture of + task demand in this case and thus the need for increasing frequency, + affecting the task's performance. Scheduler can keep track of task + migrations, fix up busy time upon migration and report per-cpu busy time + to the governor that reflects task demand accurately. + +The rest of this document explains key enhancements made to the scheduler to +accomplish both of the aforementioned goals. + +==================================== +2. WINDOW-BASED LOAD TRACKING SCHEME +==================================== + +As mentioned in the introduction section, knowledge of the CPU demand exerted by +a task is a prerequisite to knowing where to best place the task in an HMP +system. The per-entity load tracking (PELT) scheme, present in Linux kernel +since v3.7, has some perceived shortcomings when used to place tasks on HMP +systems or provide recommendations on CPU frequency. + +Per-entity load tracking does not make a distinction between the ramp up +vs ramp down time of task load. It also decays task load without exception when +a task sleeps. As an example, a cpu bound task at its peak load (LOAD_AVG_MAX or +47742) can see its load decay to 0 after a sleep of just 213ms! A cpu-bound task +running on a performance-efficient cpu could thus get re-classified as not +requiring such a cpu after a short sleep. In the case of mobile workloads, tasks +could go to sleep due to a lack of user input. When they wakeup it is very +likely their cpu utilization pattern repeats. Resetting their load across sleep +and incurring latency to reclassify them as requiring a high performance cpu can +hurt application performance. + +The window-based load tracking scheme described in this document avoids these +drawbacks. It keeps track of N windows of execution for every task. Windows +where a task had no activity are ignored and not recorded. N can be tuned at +compile time (RAVG_HIST_SIZE defined in include/linux/sched.h) or at runtime +(/proc/sys/kernel/sched_ravg_hist_size). The window size, W, is common for all +tasks and currently defaults to 10ms ('sched_ravg_window' defined in +kernel/sched/core.c). The window size can be tuned at boot time via the +sched_ravg_window=W argument to kernel. Alternately it can be tuned after boot +via tunables provided by the interactive governor. More on this later. + +Based on the N samples available per-task, a per-task "demand" attribute is +calculated which represents the cpu demand of that task. The demand attribute is +used to classify tasks as to whether or not they need a performance-efficient +CPU and also serves to provide inputs on frequency to the cpufreq governor. More +on this later. The 'sched_window_stats_policy' tunable (defined in +kernel/sched/core.c) controls how the demand field for a task is derived from +its N past samples. + +*** 2.1 Synchronized windows + +Windows of observation for task activity are synchronized across cpus. This +greatly aids in the scheduler's frequency guidance feature. Scheduler currently +relies on a synchronized clock (sched_clock()) for this feature to work. It may +be possible to extend this feature to work on systems having an unsynchronized +sched_clock(). + +struct rq { + + .. + + u64 window_start; + + .. +}; + +The 'window_start' attribute represents the time when current window began on a +cpu. It is updated when key task events such as wakeup or context-switch call +update_task_ravg() to record task activity. The window_start value is expected +to be the same for all cpus, although it could be behind on some cpus where it +has not yet been updated because update_task_ravg() has not been recently +called. For example, when a cpu is idle for a long time its window_start could +be stale. The window_start value for such cpus is rolled forward upon +occurrence of a task event resulting in a call to update_task_ravg(). + +*** 2.2 struct ravg + +The ravg struct contains information tracked per-task. + +struct ravg { + u64 mark_start; + u32 sum, demand; + u32 sum_history[RAVG_HIST_SIZE]; +}; + +struct task_struct { + + .. + + struct ravg ravg; + + .. +}; + +sum_history[] - stores cpu utilization samples from N previous windows + where task had activity + +sum - stores cpu utilization of the task in its most recently + tracked window. Once the corresponding window terminates, + 'sum' will be pushed into the sum_history[] array and is then + reset to 0. It is possible that the window corresponding to + sum is not the current window being tracked on a cpu. For + example, a task could go to sleep in window X and wakeup in + window Y (Y > X). In this case, sum would correspond to the + task's activity seen in window X. When update_task_ravg() is + called during the task's wakeup event it will be seen that + window X has elapsed. The sum value will be pushed to + 'sum_history[]' array before being reset to 0. + +demand - represents task's cpu demand and is derived from the + elements in sum_history[]. The section on + 'sched_window_stats_policy' provides more details on how + 'demand' is derived from elements in sum_history[] array + +mark_start - records timestamp of the beginning of the most recent task + event. See section on 'Task events' for possible events that + update 'mark_start' + +curr_window - this is described in the section on 'Frequency guidance' + +prev_window - this is described in the section on 'Frequency guidance' + + +*** 2.3 Scaling load statistics + +Time required for a task to complete its work (and hence its load) depends on, +among various other factors, cpu frequency and its efficiency. In a HMP system, +some cpus are more performance efficient than others. Performance efficiency of +a cpu can be described by its "instructions-per-cycle" (IPC) attribute. History +of task execution could involve task having run at different frequencies and on +cpus with different IPC attributes. To avoid ambiguity of how task load relates +to the frequency and IPC of cpus on which a task has run, task load is captured +in a scaled form, with scaling being done in reference to an "ideal" cpu that +has best possible IPC and frequency. Such an "ideal" cpu, having the best +possible frequency and IPC, may or may not exist in system. + +As an example, consider a HMP system, with two types of cpus, A53 and A57. A53 +has IPC count of 1024 and can run at maximum frequency of 1 GHz, while A57 has +IPC count of 2048 and can run at maximum frequency of 2 GHz. Ideal cpu in this +case is A57 running at 2 GHz. + +A unit of work that takes 100ms to finish on A53 running at 100MHz would get +done in 10ms on A53 running at 1GHz, in 5 ms running on A57 at 1 GHz and 2.5ms +on A57 running at 2 GHz. Thus a load of 100ms can be expressed as 2.5ms in +reference to ideal cpu of A57 running at 2 GHz. + +In order to understand how much load a task will consume on a given cpu, its +scaled load needs to be multiplied by a factor (load scale factor). In above +example, scaled load of 2.5ms needs to be multiplied by a factor of 4 in order +to estimate the load of task on A53 running at 1 GHz. + +/proc/sched_debug provides IPC attribute and load scale factor for every cpu. + +In summary, task load information stored in a task's sum_history[] array is +scaled for both frequency and efficiency. If a task runs for X ms, then the +value stored in its 'sum' field is derived as: + + X_s = X * (f_cur / max_possible_freq) * + (efficiency / max_possible_efficiency) + +where: + +X = cpu utilization that needs to be accounted +X_s = Scaled derivative of X +f_cur = current frequency of the cpu where the task was + running +max_possible_freq = maximum possible frequency (across all cpus) +efficiency = instructions per cycle (IPC) of cpu where task was + running +max_possible_efficiency = maximum IPC offered by any cpu in system + + +*** 2.4 sched_window_stats_policy + +sched_window_stats_policy controls how the 'demand' attribute for a task is +derived from elements in its 'sum_history[]' array. + +WINDOW_STATS_RECENT (0) + demand = recent + +WINDOW_STATS_MAX (1) + demand = max + +WINDOW_STATS_MAX_RECENT_AVG (2) + demand = maximum(average, recent) + +WINDOW_STATS_AVG (3) + demand = average + +where: + M = history size specified by + /proc/sys/kernel/sched_ravg_hist_size + average = average of first M samples found in the sum_history[] array + max = maximum value of first M samples found in the sum_history[] + array + recent = most recent sample (sum_history[0]) + demand = demand attribute found in 'struct ravg' + +This policy can be changed at runtime via +/proc/sys/kernel/sched_window_stats_policy. For example, the command +below would select WINDOW_STATS_USE_MAX policy + +echo 1 > /proc/sys/kernel/sched_window_stats_policy + +*** 2.5 Task events + +A number of events results in the window-based stats of a task being +updated. These are: + +PICK_NEXT_TASK - the task is about to start running on a cpu +PUT_PREV_TASK - the task stopped running on a cpu +TASK_WAKE - the task is waking from sleep +TASK_MIGRATE - the task is migrating from one cpu to another +TASK_UPDATE - this event is invoked on a currently running task to + update the task's window-stats and also the cpu's + window-stats such as 'window_start' +IRQ_UPDATE - event to record the busy time spent by an idle cpu + processing interrupts + +*** 2.6 update_task_ravg() + +update_task_ravg() is called to mark the beginning of an event for a task or a +cpu. It serves to accomplish these functions: + +a. Update a cpu's window_start value +b. Update a task's window-stats (sum, sum_history[], demand and mark_start) + +In addition update_task_ravg() updates the busy time information for the given +cpu, which is used for frequency guidance. This is described further in section +6. + +*** 2.7 update_history() + +update_history() is called on a task to record its activity in an elapsed +window. 'sum', which represents task's cpu demand in its elapsed window is +pushed onto sum_history[] array and its 'demand' attribute is updated based on +the sched_window_stats_policy in effect. + +*** 2.8 Initial task load attribute for a task (init_load_pct) + +In some cases, it may be desirable for children of a task to be assigned a +"high" load so that they can start running on best capacity cluster. By default, +newly created tasks are assigned a load defined by tunable sched_init_task_load +(Sec 7.8). Some specialized tasks may need a higher value than the global +default for their child tasks. This will let child tasks run on cpus with best +capacity. This is accomplished by setting the 'initial task load' attribute +(init_load_pct) for a task. Child tasks starting load (ravg.demand and +ravg.sum_history[]) is initialized from their parent's 'initial task load' +attribute. Note that child task's 'initial task load' attribute itself will be 0 +by default (i.e it is not inherited from parent). + +A task's 'initial task load' attribute can be set in two ways: + +**** /proc interface + +/proc/[pid]/sched_init_task_load can be written to for setting a task's 'initial +task load' attribute. A numeric value between 0 - 100 (in percent scale) is +accepted for task's 'initial task load' attribute. + +Reading /proc/[pid]/sched_init_task_load returns the 'initial task load' +attribute for the given task. + +**** kernel API + +Following kernel APIs are provided to set or retrieve a given task's 'initial +task load' attribute: + +int sched_set_init_task_load(struct task_struct *p, int init_load_pct); +int sched_get_init_task_load(struct task_struct *p); + + +=============== +3. CPU CAPACITY +=============== + +CPU capacity reflects peak performance offered by a cpu. It is defined both by +maximum frequency at which cpu can run and its efficiency attribute. Capacity of +a cpu is defined in reference to "least" performing cpu such that "least" +performing cpu has capacity of 1024. + + capacity = 1024 * (fmax_cur * / min_max_freq) * + (efficiency / min_possible_efficiency) + +where: + + fmax_cur = maximum frequency at which cpu is currently + allowed to run at + efficiency = IPC of cpu + min_max_freq = max frequency at which "least" performing cpu + can run + min_possible_efficiency = IPC of "least" performing cpu + +'fmax_cur' reflects the fact that a cpu may be constrained at runtime to run at +a maximum frequency less than what is supported. This may be a constraint placed +by user or drivers such as thermal that intends to reduce temperature of a cpu +by restricting its maximum frequency. + +'max_possible_capacity' reflects the maximum capacity of a cpu based on the +maximum frequency it supports. + +max_possible_capacity = 1024 * (fmax * / min_max_freq) * + (efficiency / min_possible_efficiency) + +where: + fmax = maximum frequency supported by a cpu + +/proc/sched_debug lists capacity and maximum_capacity information for a cpu. + +In the example HMP system quoted in Sec 2.3, "least" performing CPU is A53 and +thus min_max_freq = 1GHz and min_possible_efficiency = 1024. + +Capacity of A57 = 1024 * (2GHz / 1GHz) * (2048 / 1024) = 4096 +Capacity of A53 = 1024 * (1GHz / 1GHz) * (1024 / 1024) = 1024 + +Capacity of A57 when constrained to run at maximum frequency of 500MHz can be +calculated as: + +Capacity of A57 = 1024 * (500MHz / 1GHz) * (2048 / 1024) = 1024 + +*** 3.1 load_scale_factor + +'lsf' or load scale factor attribute of a cpu is used to estimate load of a task +on that cpu when running at its fmax_cur frequency. 'lsf' is defined in +reference to "best" performing cpu such that it's lsf is 1024. 'lsf' for a cpu +is defined as: + + lsf = 1024 * (max_possible_freq / fmax_cur) * + (max_possible_efficiency / ipc) + +where: + fmax_cur = maximum frequency at which cpu is currently + allowed to run at + ipc = IPC of cpu + max_possible_freq = max frequency at which "best" performing cpu + can run + max_possible_efficiency = IPC of "best" performing cpu + +In the example HMP system quoted in Sec 2.3, "best" performing CPU is A57 and +thus max_possible_freq = 2 GHz, max_possible_efficiency = 2048 + +lsf of A57 = 1024 * (2GHz / 2GHz) * (2048 / 2048) = 1024 +lsf of A53 = 1024 * (2GHz / 1 GHz) * (2048 / 1024) = 4096 + +lsf of A57 constrained to run at maximum frequency of 500MHz can be calculated +as: + +lsf of A57 = 1024 * (2GHz / 500Mhz) * (2048 / 2048) = 4096 + +To estimate load of a task on a given cpu running at its fmax_cur: + + load = scaled_load * lsf / 1024 + +A task with scaled load of 20% would thus be estimated to consume 80% bandwidth +of A53 running at 1GHz. The same task with scaled load of 20% would be estimated +to consume 160% bandwidth on A53 constrained to run at maximum frequency of +500MHz. + +load_scale_factor, thus, is very useful to estimate load of a task on a given +cpu and thus to decide whether it can fit in a cpu or not. + +*** 3.2 cpu_power + +A metric 'cpu_power' related to 'capacity' is also listed in /proc/sched_debug. +'cpu_power' is ideally same for all cpus (1024) when they are idle and running +at the same frequency. 'cpu_power' of a cpu can be scaled down from its ideal +value to reflect reduced frequency it is operating at and also to reflect the +amount of cpu bandwidth consumed by real-time tasks executing on it. +'cpu_power' metric is used by scheduler to decide task load distribution among +cpus. CPUs with low 'cpu_power' will be assigned less task load compared to cpus +with higher 'cpu_power' + +============ +4. CPU POWER +============ + +The HMP scheduler extensions currently depend on an architecture-specific driver +to provide runtime information on cpu power. In the absence of an +architecture-specific driver, the scheduler will resort to using the +max_possible_capacity metric of a cpu as a measure of its power. + +================ +5. HMP SCHEDULER +================ + +For normal (SCHED_OTHER/fair class) tasks there are three paths in the +scheduler which these HMP extensions affect. The task wakeup path, the +load balancer, and the scheduler tick are each modified. + +Real-time and stop-class tasks are served by different code +paths. These will be discussed separately. + +Prior to delving further into the algorithm and implementation however +some definitions are required. + +*** 5.1 Classification of Tasks and CPUs + +With the extensions described thus far, the following information is +available to the HMP scheduler: + +- per-task CPU demand information from either Per-Entity Load Tracking + (PELT) or the window-based algorithm described above + +- a power value for each frequency supported by each CPU via the API + described in section 4 + +- current CPU frequency, maximum CPU frequency (may be throttled by at + runtime due to thermal conditions), maximum possible CPU frequency supported + by hardware + +- data previously maintained within the scheduler such as the number + of currently runnable tasks on each CPU + +Combined with tunable parameters, this information can be used to classify +both tasks and CPUs to aid in the placement of tasks. + +- big task + + A big task is one that exerts a CPU demand too high for a particular + CPU to satisfy. The scheduler will attempt to find a CPU with more + capacity for such a task. + + The definition of "big" is specific to a task *and* a CPU. A task + may be considered big on one CPU in the system and not big on + another if the first CPU has less capacity than the second. + + What task demand is "too high" for a particular CPU? One obvious + answer would be a task demand which, as measured by PELT or + window-based load tracking, matches or exceeds the capacity of that + CPU. A task which runs on a CPU for a long time, for example, might + meet this criteria as it would report 100% demand of that CPU. It + may be desirable however to classify tasks which use less than 100% + of a particular CPU as big so that the task has some "headroom" to grow + without its CPU bandwidth getting capped and its performance requirements + not being met. This task demand is therefore a tunable parameter: + + /proc/sys/kernel/sched_upmigrate + + This value is a percentage. If a task consumes more than this much of a + particular CPU, that CPU will be considered too small for the task. The task + will thus be seen as a "big" task on the cpu and will reflect in nr_big_tasks + statistics maintained for that cpu. Note that certain tasks (whose nice + value exceeds SCHED_UPMIGRATE_MIN_NICE value or those that belong to a cgroup + whose upmigrate_discourage flag is set) will never be classified as big tasks + despite their high demand. + + As the load scale factor is calculated against current fmax, it gets boosted + when a lower capacity CPU is restricted to run at lower fmax. The task + demand is inflated in this scenario and the task upmigrates early to the + maximum capacity CPU. Hence this threshold is auto-adjusted by a factor + equal to max_possible_frequency/current_frequency of a lower capacity CPU. + This adjustment happens only when the lower capacity CPU frequency is + restricted. The same adjustment is applied to the downmigrate threshold + as well. + + When the frequency restriction is relaxed, the previous values are restored. + sched_up_down_migrate_auto_update macro defined in kernel/sched/core.c + controls this auto-adjustment behavior and it is enabled by default. + + If the adjusted upmigrate threshold exceeds the window size, it is clipped to + the window size. If the adjusted downmigrate threshold decreases the difference + between the upmigrate and downmigrate, it is clipped to a value such that the + difference between the modified and the original thresholds is same. + +- spill threshold + + Tasks will normally be placed on lowest power-cost cluster where they can fit. + This could result in power-efficient cluster becoming overcrowded when there + are "too" many low-demand tasks. Spill threshold provides a spill over + criteria, wherein low-demand task are allowed to be placed on idle or + busy cpus in high-performance cluster. + + Scheduler will avoid placing a task on a cpu if it can result in cpu exceeding + its spill threshold, which is defined by two tunables: + + /proc/sys/kernel/sched_spill_nr_run (default: 10) + /proc/sys/kernel/sched_spill_load (default : 100%) + + A cpu is considered to be above its spill level if it already has 10 tasks or + if the sum of task load (scaled in reference to given cpu) and + rq->cumulative_runnable_avg exceeds 'sched_spill_load'. + +- power band + + The scheduler may be faced with a tradeoff between power and performance when + placing a task. If the scheduler sees two CPUs which can accommodate a task: + + CPU 1, power cost of 20, load of 10 + CPU 2, power cost of 10, load of 15 + + It is not clear what the right choice of CPU is. The HMP scheduler + offers the sched_powerband_limit tunable to determine how this + situation should be handled. When the power delta between two CPUs + is less than sched_powerband_limit_pct, load will be prioritized as + the deciding factor as to which CPU is selected. If the power delta + between two CPUs exceeds that, the lower power CPU is considered to + be in a different "band" and it is selected, despite perhaps having + a higher current task load. + +*** 5.2 select_best_cpu() + +CPU placement decisions for a task at its wakeup or creation time are the +most important decisions made by the HMP scheduler. This section will describe +the call flow and algorithm used in detail. + +The primary entry point for a task wakeup operation is try_to_wake_up(), +located in kernel/sched/core.c. This function relies on select_task_rq() to +determine the target CPU for the waking task. For fair-class (SCHED_OTHER) +tasks, that request will be routed to select_task_rq_fair() in +kernel/sched/fair.c. As part of these scheduler extensions a hook has been +inserted into the top of that function. If HMP scheduling is enabled the normal +scheduling behavior will be replaced by a call to select_best_cpu(). This +function, select_best_cpu(), represents the heart of the HMP scheduling +algorithm described in this document. Note that select_best_cpu() is also +invoked for a task being created. + +The behavior of select_best_cpu() depends on several factors such as boost +setting, choice of several tunables and on task demand. + +**** 5.2.1 Boost + +The task placement policy changes signifincantly when scheduler boost is in +effect. When boost is in effect the scheduler ignores the power cost of +placing tasks on CPUs. Instead it figures out the load on each CPU and then +places task on the least loaded CPU. If the load of two or more CPUs is the +same (generally when CPUs are idle) the task prefers to go highest capacity +CPU in the system. + +A further enhancement during boost is the scheduler' early detection feature. +While boost is in effect the scheduler checks for the precence of tasks that +have been runnable for over some period of time within the tick. For such +tasks the scheduler informs the governor of imminent need for high frequency. +If there exists a task on the runqueue at the tick that has been runnable +for greater than SCHED_EARLY_DETECTION_DURATION amount of time, it notifies +the governor with a fabricated load of the full window at the highest +frequency. The fabricated load is maintained until the task is no longer +runnable or until the next tick. + +Boost can be set via either /proc/sys/kernel/sched_boost or by invoking +kernel API sched_set_boost(). + + int sched_set_boost(int enable); + +Once turned on, boost will remain in effect until it is explicitly turned off. +To allow for boost to be controlled by multiple external entities (application +or kernel module) at same time, boost setting is reference counted. This means +that two applications can turn on boost and the effect of boost is eliminated +only after both applications have turned off boost. boost_refcount variable +represents this reference count. + +**** 5.2.2 task_will_fit() + +The overall goal of select_best_cpu() is to place a task on the least power +cluster where it can "fit" i.e where its cpu usage shall be below the capacity +offered by cluster. Criteria for a task to be considered as fitting in a cluster +is: + + i) A low-priority task, whose nice value is greater than + SCHED_UPMIGRATE_MIN_NICE or whose cgroup has its + upmigrate_discourage flag set, is considered to be fitting in all clusters, + irrespective of their capacity and task's cpu demand. + + ii) All tasks are considered to fit in highest capacity cluster. + + iii) Task demand scaled in reference to the given cluster should be less than a + threshold. See section on load_scale_factor to know more about how task + demand is scaled in reference to a given cpu (cluster). The threshold used + is normally sched_upmigrate. Its possible for a task's demand to exceed + sched_upmigrate threshold in reference to a cluster when its upmigrated to + higher capacity cluster. To prevent it from coming back immediately to + lower capacity cluster, the task is not considered to "fit" on its earlier + cluster until its demand has dropped below sched_downmigrate in reference + to that earlier cluster. sched_downmigrate thus provides for some + hysteresis control. + + +**** 5.2.3 Factors affecting select_best_cpu() + +Behavior of select_best_cpu() is further controlled by several tunables and +synchronous nature of wakeup. + +a. /proc/sys/kernel/sched_cpu_high_irqload + A cpu whose irq load is greater than this threshold will not be + considered eligible for placement. This threshold value in expressed in + nanoseconds scale, with default threshold being 10000000 (10ms). See + notes on sched_cpu_high_irqload tunable to understand how irq load on a + cpu is measured. + +b. Synchronous nature of wakeup + Synchronous wakeup is a hint to scheduler that the task issuing wakeup + (i.e task currently running on cpu where wakeup is being processed by + scheduler) will "soon" relinquish CPU. A simple example is two tasks + communicating with each other using a pipe structure. When reader task + blocks waiting for data, its woken by writer task after it has written + data to pipe. Writer task usually blocks waiting for reader task to + consume data in pipe (which may not have any more room for writes). + + Synchronous wakeup is accounted for by adjusting load of a cpu to not + include load of currently running task. As a result, a cpu that has only + one runnable task and which is currently processing synchronous wakeup + will be considered idle. + +c. PF_WAKE_UP_IDLE + Any task with this flag set will be woken up to an idle cpu (if one is + available) independent of sched_prefer_idle flag setting, its demand and + synchronous nature of wakeup. Similarly idle cpu is preferred during + wakeup for any task that does not have this flag set but is being woken + by a task with PF_WAKE_UP_IDLE flag set. For simplicity, we will use the + term "PF_WAKE_UP_IDLE wakeup" to signify wakeups involving a task with + PF_WAKE_UP_IDLE flag set. + +d. /proc/sys/kernel/sched_select_prev_cpu_us + This threshold controls whether task placement goes through fast path or + not. If task's wakeup time since last sleep is short there are high + chances that it's better to place the task on its previous CPU. This + reduces task placement latency, cache miss and number of migrations. + Default value of sched_select_prev_cpu_us is 2000 (2ms). This can be + turned off by setting it to 0. + +e. /proc/sys/kernel/sched_short_burst_ns + This threshold controls whether a task is considered as "short-burst" + or not. "short-burst" tasks are eligible for packing to avoid overhead + associated with waking up an idle CPU. "non-idle" CPUs which are not + loaded with IRQs and can accommodate the waking task without exceeding + spill limits are considered. The ties are broken with load followed + by previous CPU. This tunable does not affect cluster selection. + It only affects CPU selection in a given cluster. This packing is + skipped for tasks that are eligible for "wake-up-idle" and "boost". + +**** 5.2.4 Wakeup Logic for Task "p" + +Wakeup task placement logic is as follows: + +1) Eliminate CPUs with high irq load based on sched_cpu_high_irqload tunable. + +2) Eliminate CPUs where either the task does not fit or CPUs where placement +will result in exceeding the spill threshold tunables. CPUs elimiated at this +stage will be considered as backup choices incase none of the CPUs get past +this stage. + +3) Find out and return the least power CPU that satisfies all conditions above. + +4) If two or more CPUs are projected to have the same power, break ties in the +following preference order: + a) The CPU is the task's previous CPU. + b) The CPU is in the same cluster as the task's previous CPU. + c) The CPU has the least load + +The placement logic described above does not apply when PF_WAKE_UP_IDLE is set +for either the waker task or the wakee task. Instead the scheduler chooses the +most power efficient idle CPU. + +5) If no CPU is found after step 2, resort to backup CPU selection logic +whereby the CPU with highest amount of spare capacity is selected. + +6) If none of the CPUs have any spare capacity, return the task's previous +CPU. + +*** 5.3 Scheduler Tick + +Every CPU is interrupted periodically to let kernel update various statistics +and possibly preempt the currently running task in favor of a waiting task. This +periodicity, determined by CONFIG_HZ value, is set at 10ms. There are various +optimizations by which a CPU however can skip taking these interrupts (ticks). +A cpu going idle for considerable time in one such case. + +HMP scheduler extensions brings in a change in processing of tick +(scheduler_tick()) that can result in task migration. In case the currently +running task on a cpu belongs to fair_sched class, a check is made if it needs +to be migrated. Possible reasons for migrating task could be: + +a) A big task is running on a power-efficient cpu and a high-performance cpu is +available (idle) to service it + +b) A task is starving on a CPU with high irq load. + +c) A task with upmigration discouraged is running on a performance cluster. +See notes on 'cpu.upmigrate_discourage'. + +In case the test for migration turns out positive (which is expected to be rare +event), a candidate cpu is identified for task migration. To avoid multiple task +migrations to the same candidate cpu(s), identification of candidate cpu is +serialized via global spinlock (migration_lock). + +*** 5.4 Load Balancer + +Load balance is a key functionality of scheduler that strives to distribute task +across available cpus in a "fair" manner. Most of the complexity associated with +this feature involves balancing fair_sched class tasks. Changes made to load +balance code serve these goals: + +1. Restrict flow of tasks from power-efficient cpus to high-performance cpu. + Provide a spill-over threshold, defined in terms of number of tasks + (sched_spill_nr_run) and cpu demand (sched_spill_load), beyond which tasks + can spill over from power-efficient cpu to high-performance cpus. + +2. Allow idle power-efficient cpus to pick up extra load from over-loaded + performance-efficient cpu + +3. Allow idle high-performance cpu to pick up big tasks from power-efficient cpu + +*** 5.5 Real Time Tasks + +Minimal changes introduced in treatment of real-time tasks by HMP scheduler +aims at preferring scheduling of real-time tasks on cpus with low load on +a power efficient cluster. + +Prior to HMP scheduler, the fast-path cpu selection for placing a real-time task +(at wakeup) is its previous cpu, provided the currently running task on its +previous cpu is not a real-time task or a real-time task with lower priority. +Failing this, cpu selection in slow-path involves building a list of candidate +cpus where the waking real-time task will be of highest priority and thus can be +run immediately. The first cpu from this candidate list is chosen for the waking +real-time task. Much of the premise for this simple approach is the assumption +that real-time tasks often execute for very short intervals and thus the focus +is to place them on a cpu where they can be run immediately. + +HMP scheduler brings in a change which avoids fast-path and always resorts to +slow-path. Further cpu with lowest load in a power efficient cluster from +candidate list of cpus is chosen as cpu for placing waking real-time task. + +- PF_WAKE_UP_IDLE + +Idle cpu is preferred for any waking task that has this flag set in its +'task_struct.flags' field. Further idle cpu is preferred for any task woken by +such tasks. PF_WAKE_UP_IDLE flag of a task is inherited by it's children. It can +be modified for a task in two ways: + + > kernel-space interface + set_wake_up_idle() needs to be called in the context of a task + to set or clear its PF_WAKE_UP_IDLE flag. + + > user-space interface + /proc/[pid]/sched_wake_up_idle file needs to be written to for + setting or clearing PF_WAKE_UP_IDLE flag for a given task + +===================== +6. FREQUENCY GUIDANCE +===================== + +As mentioned in the introduction section the scheduler is in a unique +position to assist with the determination of CPU frequency. Because +the scheduler now maintains an estimate of per-task CPU demand, task +activity can be tracked, aggregated and provided to the CPUfreq +governor as a replacement for simple CPU busy time. + +Two of the most popular CPUfreq governors, interactive and ondemand, +utilize a window-based approach for measuring CPU busy time. This +works well with the window-based load tracking scheme previously +described. The following APIs are provided to allow the CPUfreq +governor to query busy time from the scheduler instead of using the +basic CPU busy time value derived via get_cpu_idle_time_us() and +get_cpu_iowait_time_us() APIs. + + int sched_set_window(u64 window_start, unsigned int window_size) + + This API is invoked by governor at initialization time or whenever + window size is changed. 'window_size' argument (in jiffy units) + indicates the size of window to be used. The first window of size + 'window_size' is set to begin at jiffy 'window_start' + + -EINVAL is returned if per-entity load tracking is in use rather + than window-based load tracking, otherwise a success value of 0 + is returned. + + int sched_get_busy(int cpu) + + Returns the busy time for the given CPU in the most recent + complete window. The value returned is microseconds of busy + time at fmax of given CPU. + +The values returned by sched_get_busy() take a bit of explanation, +both in what they mean and also how they are derived. + +*** 6.1 Per-CPU Window-Based Stats + +The scheduler tracks two separate types of quantities on a per CPU basis. +The first type has to deal with the aggregate load on a CPU and the second +type deals with top-tasks on that same CPU. We will first proceed to explain +what is maintained as part of each type of statistics and then provide the +connection between these two types of statistics at the end. + +First lets describe the HMP scheduler extensions to track the aggregate load +seen on each CPU. This is done using the same windows that the task demand +is tracked with (which is in turn set by the governor when frequency guidance +is in use). There are four quantities maintained for each CPU by the HMP +scheduler for tracking CPU load: + + curr_runnable_sum: aggregate demand from all tasks which executed during + the current (not yet completed) window + + prev_runnable_sum: aggregate demand from all tasks which executed during + the most recent completed window + + nt_curr_runnable_sum: aggregate demand from all 'new' tasks which executed + during the current (not yet completed) window + + nt_prev_runnable_sum: aggregate demand from all 'new' tasks which executed + during the most recent completed window. + +When the scheduler is updating a task's window-based stats it also +updates these values. Like per-task window-based demand these +quantities are normalized against the max possible frequency and max +efficiency (instructions per cycle) in the system. If an update occurs +and a window rollover is observed, curr_runnable_sum is copied into +prev_runnable_sum before being reset to 0. The sched_get_busy() API +returns prev_runnable_sum, scaled to the efficiency and fmax of given +CPU. The same applies to nt_curr_runnable_sum and nt_prev_runnable_sum. + +A 'new' task is defined as a task whose number of active windows since fork is +less than SCHED_NEW_TASK_WINDOWS. An active window is defined as a window +where a task was observed to be runnable. + +Moving on the second type of statistics; top-tasks, the scheduler tracks a list +of top tasks per CPU. A top-task is defined as the task that runs the most in a +given window on that CPU. This includes task that ran on that CPU through out +the window or were migrated to that CPU prior to window expiration. It does not +include tasks that were migrated away from that CPU prior to window expiration. + +To track top tasks, we first realize that there is no strict need to maintain +the task struct itself as long as we know the load exerted by the top task. We +also realize that to maintain top tasks on every CPU we have to track the +execution of every single task that runs during the window. The load associated +with a task needs to be migrated when the task migrates from one CPU to another. +When the top task migrates away, we need to locate the second top task and so +on. + +Given the above realizations, we use hashmaps to track top task load both +for the current and the previous window. This hashmap is implemented as an array +of fixed size. The key of the hashmap is given by +task_execution_time_in_a_window / array_size. The size of the array (number of +buckets in the hashmap) dictate the load granularity of each bucket. The value +stored in each bucket is a refcount of all the tasks that executed long enough +to be in that bucket. This approach has a few benefits. Firstly, any top task +stats update now take O(1) time. While task migration is also O(1), it does +still involve going through up to the size of the array to find the second top +task. We optimize this search by using bitmaps. The next set bit in the bitmap +gives the position of the second top task in our hashamp. + +Secondly, and more importantly, not having to store the task struct itself +saves a lot of memory usage in that 1) there is no need to retrieve task structs +later causing cache misses and 2) we don't have to unnecessarily hold up task +memory for up to 2 full windows by calling get_task_struct() after a task exits. + +Given the motivation above, here are a list of quantities tracked as part of +per CPU task top-tasks management + + top_tasks[NUM_TRACKED_WINDOWS] - Hashmap of top-task load for the current and + previous window + + BITMAP_ARRAY(top_tasks_bitmap) - Two bitmaps for the current and previous + windows corresponding to the top-task + hashmap. + + load_subs[NUM_TRACKED_WINDOWS] - An array of load subtractions to be carried + out form curr/prev_runnable_sums for each CPU + prior to reporting load to the governor. The + purpose for this will be explained later in + the section pertaining to the TASK_MIGRATE + event. The type struct load_subtractions, + stores the value of the subtraction along + with the window start value for the window + for which the subtraction has to take place. + + + curr_table - Indication of which index of the array points to the current + window. + + curr_top - The top task on a CPU at any given moment in the current window + + prev_top - The top task on a CPU in the previous window + + +*** 6.2 Per-task window-based stats + +Corresponding to curr_runnable_sum and prev_runnable_sum, two counters are +maintained per-task + +curr_window_cpu - represents task's contribution to cpu busy time on + various CPUs in the current window + +prev_window_cpu - represents task's contribution to cpu busy time on + various CPUs in the previous window + +curr_window - represents the sum of all entries in curr_window_cpu + +prev_window - represents the sum of all entries in prev_window_cpu + +"cpu demand" of a task includes its execution time and can also include its +wait time. 'SCHED_FREQ_ACCOUNT_WAIT_TIME' controls whether task's wait +time is included in its CPU load counters or not. + +Curr_runnable_sum counter of a cpu is derived from curr_window_cpu[cpu] +counter of various tasks that ran on it in its most recent window. + +*** 6.3 Effect of various task events + +We now consider various events and how they affect above mentioned counters. + +PICK_NEXT_TASK + This represents beginning of execution for a task. Provided the task + refers to a non-idle task, a portion of task's wait time that + corresponds to the current window being tracked on a cpu is added to + task's curr_window_cpu and curr_window counter, provided + SCHED_FREQ_ACCOUNT_WAIT_TIME is set. The same quantum is also added to + cpu's curr_runnable_sum counter. The remaining portion, which + corresponds to task's wait time in previous window is added to task's + prev_window, prev_window_cpu and cpu's prev_runnable_sum counters. + + CPUs top_tasks hashmap is updated if needed with the new information. + Any previous entries in the hashmap are deleted and newer entries are + created. The top_tasks_bitmap reflects the updated state of the + hashmap. If the top task for the current and/or previous window has + changed, curr_top and prev_top are updated accordingly. + +PUT_PREV_TASK + This represents end of execution of a time-slice for a task, where the + task could refer to a cpu's idle task also. In case the task is non-idle + or (in case of task being idle with cpu having non-zero rq->nr_iowait + count and sched_io_is_busy =1), a portion of task's execution time, that + corresponds to current window being tracked on a cpu is added to task's + curr_window_cpu and curr_window counter and also to cpu's + curr_runnable_sum counter. Portion of task's execution that corresponds + to the previous window is added to task's prev_window, prev_window_cpu + and cpu's prev_runnable_sum counters. + + CPUs top_tasks hashmap is updated if needed with the new information. + Any previous entries in the hashmap are deleted and newer entries are + created. The top_tasks_bitmap reflects the updated state of the + hashmap. If the top task for the current and/or previous window has + changed, curr_top and prev_top are updated accordingly. + +TASK_UPDATE + This event is called on a cpu's currently running task and hence + behaves effectively as PUT_PREV_TASK. Task continues executing after + this event, until PUT_PREV_TASK event occurs on the task (during + context switch). + +TASK_WAKE + This event signifies a task waking from sleep. Since many windows + could have elapsed since the task went to sleep, its + curr_window_cpu/curr_window and prev_window_cpu/prev_window are + updated to reflect task's demand in the most recent and its previous + window that is being tracked on a cpu. Updated stats will trigger + the same book-keeping for top-tasks as other events. + +TASK_MIGRATE + This event signifies task migration across cpus. It is invoked on the + task prior to being moved. Thus at the time of this event, the task + can be considered to be in "waiting" state on src_cpu. In that way + this event reflects actions taken under PICK_NEXT_TASK (i.e its + wait time is added to task's curr/prev_window/_cpu counters as well + as src_cpu's curr/prev_runnable_sum counters, provided + SCHED_FREQ_ACCOUNT_WAIT_TIME is non-zero). + + After that update, we make a distinction between intra-cluster and + inter-cluster migrations for further book-keeping. + + For intra-cluster migrations, we simply remove the entry for the task + in the top_tasks hashmap from the source CPU and add the entry to the + destination CPU. The top_tasks_bitmap, curr_top and prev_top are + updated accordingly. We then find the second top-task top in our + top_tasks hashmap for both the current and previous window and set + curr_top and prev_top to their new values. + + For inter-cluster migrations we have a much more complicated scheme. + Firstly we add to the destination CPU's curr/prev_runnable_sum + the tasks curr/prev_window. Note we add the sum and not the + contribution any individual CPU. This is because when a tasks migrates + across clusters, we need the new cluster to ramp up to the appropriate + frequency given the task's total execution summed up across all CPUs + in the previous cluster. + + Secondly the src_cpu's curr/prev_runnable_sum are reduced by task's + curr/prev_window_cpu values. + + Thirdly, we need to walk all the CPUs in the cluster and subtract from + each CPU's curr/prev_runnable_sum the task's respective + curr/prev_window_cpu values. However, subtracting load from each of + the source CPUs is not trivial, as it would require all runqueue + locks to be held. To get around this we introduce a deferred load + subtraction mechanism whereby subtracting load from each of the source + CPUs is deferred until an opportune moment. This opportune moment is + when the governor comes asking the scheduler for load. At that time, all + necessary runqueue locks are already held. + + There are a few cases to consider when doing deferred subtraction. Since + we are not holding all runqueue locks other CPUs in the source cluster + can be in a different window than the source CPU where the task is + migrating from. + + Case 1: + Other CPU in the source cluster is in the same window. No special + consideration. + + Case 2: + Other CPU in the source cluster is ahead by 1 window. In this + case, we will be doing redundant updates to subtraction load for the + prev window. There is no way to avoid this redundant update though, + without holding the rq lock. + + Case 3: + Other CPU in the source cluster is trailing by 1 window In this + case, we might end up overwriting old data for that CPU. But this is not + a problem as when the other CPU calls update_task_ravg() it will move to + the same window. This relies on maintaining synchronized windows between + CPUs, which is true today. + + To achieve all the above, we simple add the task's curr/prev_window_cpu + contributions to the per CPU load_subtractions array. These load + subtractions are subtracted from the respective CPU's + curr/prev_runnable_sums before the governor queries CPU load. Once this + is complete, the scheduler sets all curr/prev_window_cpu contributions + of the task to 0 for all CPUs in the source cluster. The destination + CPUs's curr/prev_window_cpu is updated with the tasks curr/prev_window + sums. + + Finally, we must deal with frequency aggregation. When frequency + aggregation is in effect, there is little point in dealing with per CPU + footprint since the load of all related tasks have to be reported on a + single CPU. Therefore when a task enters a related group we clear out + all per CPU contributions and add it to the task CPU's cpu_time struct. + From that point onwards we stop managing per CPU contributions upon + inter cluster migrations since that work is redundant. Finally when a + task exits a related group we must walk every CPU in reset all CPU + contributions. We then set the task CPU contribution to the respective + curr/prev sum values and add that sum to the task CPU rq runnable sum. + + Top-task management is the same as in the case of intra-cluster + migrations. + +IRQ_UPDATE + This event signifies end of execution of an interrupt handler. This + event results in update of cpu's busy time counters, curr_runnable_sum + and prev_runnable_sum, provided cpu was idle. When sched_io_is_busy = 0, + only the interrupt handling time is added to cpu's curr_runnable_sum and + prev_runnable_sum counters. When sched_io_is_busy = 1, the event mirrors + actions taken under TASK_UPDATED event i.e time since last accounting + of idle task's cpu usage is added to cpu's curr_runnable_sum and + prev_runnable_sum counters. No update is needed for top-tasks in this + case. + +*** 6.4 Tying it all together + +Now the scheduler maintains two independent quantities for load reporing 1) CPU +load as represented by prev_runnable_sum and 2) top-tasks. The reported load +is governed by tunable sched_freq_reporting_policy. The default choice is +FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK. In other words: + +max(prev_runnable_sum, top_task load) + +Let's explain the rationale behind the choice. CPU load tracks the exact amount +of execution observed on a CPU. This is close to the quantity that the vanilla +governor used to track. It offers the advantages of no load over-reporting that +our earlier load fixup mechanisms had deal with. It then also tackles the part +picture problem by keeping of track of tasks that might be migrating across +CPUs leaving a small footprint on each CPU. Since we maintain one top task per +CPU, we can handle as many top tasks as the number of CPUs in a cluster. We +might miss a few cases where the combined load of the top and non-top tasks on +a CPU are more representative of the true load. However, those cases have been +deemed to rare and have little impact on overall load/frequency behavior. + + +=========== +7. TUNABLES +=========== + +*** 7.1 sched_spill_load + +Appears at: /proc/sys/kernel/sched_spill_load + +Default value: 100 + +CPU selection criteria for fair-sched class tasks is the lowest power cpu where +they can fit. When the most power-efficient cpu where a task can fit is +overloaded (aggregate demand of tasks currently queued on it exceeds +sched_spill_load), a task can be placed on a higher-performance cpu, even though +the task strictly doesn't need one. + +*** 7.2 sched_spill_nr_run + +Appears at: /proc/sys/kernel/sched_spill_nr_run + +Default value: 10 + +The intent of this tunable is similar to sched_spill_load, except it applies to +nr_running count of a cpu. A task can spill over to a higher-performance cpu +when the most power-efficient cpu where it can normally fit has more tasks than +sched_spill_nr_run. + +*** 7.3 sched_upmigrate + +Appears at: /proc/sys/kernel/sched_upmigrate + +Default value: 80 + +This tunable is a percentage. If a task consumes more than this much +of a CPU, the CPU is considered too small for the task and the +scheduler will try to find a bigger CPU to place the task on. + +*** 7.4 sched_init_task_load + +Appears at: /proc/sys/kernel/sched_init_task_load + +Default value: 15 + +This tunable is a percentage. When a task is first created it has no +history, so the task load tracking mechanism cannot determine a +historical load value to assign to it. This tunable specifies the +initial load value for newly created tasks. Also see Sec 2.8 on per-task +'initial task load' attribute. + +*** 7.5 sched_ravg_hist_size + +Appears at: /proc/sys/kernel/sched_ravg_hist_size + +Default value: 5 + +This tunable controls the number of samples used from task's sum_history[] +array for determination of its demand. + +*** 7.6 sched_window_stats_policy + +Appears at: /proc/sys/kernel/sched_window_stats_policy + +Default value: 2 + +This tunable controls the policy in how window-based load tracking +calculates an overall demand value based on the windows of CPU +utilization it has collected for a task. + +Possible values for this tunable are: +0: Just use the most recent window sample of task activity when calculating + task demand. +1: Use the maximum value of first M samples found in task's cpu demand + history (sum_history[] array), where M = sysctl_sched_ravg_hist_size +2: Use the maximum of (the most recent window sample, average of first M + samples), where M = sysctl_sched_ravg_hist_size +3. Use average of first M samples, where M = sysctl_sched_ravg_hist_size + +*** 7.7 sched_ravg_window + +Appears at: kernel command line argument + +Default value: 10000000 (10ms, units of tunable are nanoseconds) + +This specifies the duration of each window in window-based load +tracking. By default each window is 10ms long. This quantity must +currently be set at boot time on the kernel command line (or the +default value of 10ms can be used). + +*** 7.8 RAVG_HIST_SIZE + +Appears at: compile time only (see RAVG_HIST_SIZE in include/linux/sched.h) + +Default value: 5 + +This macro specifies the number of windows the window-based load +tracking mechanism maintains per task. If default values are used for +both this and sched_ravg_window then a total of 50ms of task history +would be maintained in 5 10ms windows. + +*** 7.9 sched_freq_inc_notify + +Appears at: /proc/sys/kernel/sched_freq_inc_notify + +Default value: 10 * 1024 * 1024 (10 Ghz) + +When scheduler detects that cur_freq of a cluster is insufficient to meet +demand, it sends notification to governor, provided (freq_required - cur_freq) +exceeds sched_freq_inc_notify, where freq_required is the frequency calculated +by scheduler to meet current task demand. Note that sched_freq_inc_notify is +specified in kHz units. + +*** 7.10 sched_freq_dec_notify + +Appears at: /proc/sys/kernel/sched_freq_dec_notify + +Default value: 10 * 1024 * 1024 (10 Ghz) + +When scheduler detects that cur_freq of a cluster is far greater than what is +needed to serve current task demand, it will send notification to governor. +More specifically, notification is sent when (cur_freq - freq_required) +exceeds sched_freq_dec_notify, where freq_required is the frequency calculated +by scheduler to meet current task demand. Note that sched_freq_dec_notify is +specified in kHz units. + +*** 7.11 sched_cpu_high_irqload + +Appears at: /proc/sys/kernel/sched_cpu_high_irqload + +Default value: 10000000 (10ms) + +The scheduler keeps a decaying average of the amount of irq and softirq activity +seen on each CPU within a ten millisecond window. Note that this "irqload" +(reported in the sched_cpu_load_* tracepoint) will be higher than the typical load +in a single window since every time the window rolls over, the value is decayed +by some fraction and then added to the irq/softirq time spent in the next +window. + +When the irqload on a CPU exceeds the value of this tunable, the CPU is no +longer eligible for placement. This will affect the task placement logic +described above, causing the scheduler to try and steer tasks away from +the CPU. + +*** 7.12 cpu.upmigrate_discourage + +Default value : 0 + +This is a cgroup attribute supported by the cpu resource controller. It normally +appears at [root_cpu]/[name1]/../[name2]/cpu.upmigrate_discourage. Here +"root_cpu" is the mount point for cgroup (cpu resource control) filesystem +and name1, name2 etc are names of cgroups that form a hierarchy. + +Setting this flag to 1 discourages upmigration for all tasks of a cgroup. High +demand tasks of such a cgroup will never be classified as big tasks and hence +not upmigrated. Any task of the cgroup is allowed to upmigrate only under +overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for +how overcommitment threshold is defined. + +*** 7.13 sched_static_cpu_pwr_cost + +Default value: 0 + +Appears at /sys/devices/system/cpu/cpu/sched_static_cpu_pwr_cost + +This is the power cost associated with bringing an idle CPU out of low power +mode. It ignores the actual C-state that a CPU may be in and assumes the +worst case power cost of the highest C-state. It is means of biasing task +placement away from idle CPUs when necessary. It can be defined per CPU, +however, a more appropriate usage to define the same value for every CPU +within a cluster and possibly have differing value between clusters as +needed. + + +*** 7.14 sched_static_cluster_pwr_cost + +Default value: 0 + +Appears at /sys/devices/system/cpu/cpu/sched_static_cluster_pwr_cost + +This is the power cost associated with bringing an idle cluster out of low +power mode. It ignores the actual D-state that a cluster may be in and assumes +the worst case power cost of the highest D-state. It is means of biasing task +placement away from idle clusters when necessary. + +*** 7.15 sched_restrict_cluster_spill + +Default value: 0 + +Appears at /proc/sys/kernel/sched_restrict_cluster_spill + +This tunable can be used to restrict tasks spilling to the higher capacity +(higher power) cluster. When this tunable is enabled, + +- Restrict the higher capacity cluster pulling tasks from the lower capacity +cluster in the load balance path. The restriction is lifted if all of the CPUS +in the lower capacity cluster are above spill. The power cost is used to break +the ties if the capacity of clusters are same for applying this restriction. + +- The current CPU selection algorithm for RT tasks looks for the least loaded +CPU across all clusters. When this tunable is enabled, the RT tasks are +restricted to the lowest possible power cluster. + + +*** 7.16 sched_downmigrate + +Appears at: /proc/sys/kernel/sched_downmigrate + +Default value: 60 + +This tunable is a percentage. It exists to control hysteresis. Lets say a task +migrated to a high-performance cpu when it crossed 80% demand on a +power-efficient cpu. We don't let it come back to a power-efficient cpu until +its demand *in reference to the power-efficient cpu* drops less than 60% +(sched_downmigrate). + + +*** 7.17 sched_small_wakee_task_load + +Appears at: /proc/sys/kernel/sched_small_wakee_task_load + +Default value: 10 + +This tunable is a percentage. Configure the maximum demand of small wakee task. +Sync wakee tasks which have demand less than sched_small_wakee_task_load are +categorized as small wakee tasks. Scheduler places small wakee tasks on the +waker's cluster. + + +*** 7.18 sched_big_waker_task_load + +Appears at: /proc/sys/kernel/sched_big_waker_task_load + +Default value: 25 + +This tunable is a percentage. Configure the minimum demand of big sync waker +task. Scheduler places small wakee tasks woken up by big sync waker on the +waker's cluster. + +*** 7.19 sched_prefer_sync_wakee_to_waker + +Appears at: /proc/sys/kernel/sched_prefer_sync_wakee_to_waker + +Default value: 0 + +The default sync wakee policy has a preference to select an idle CPU in the +waker cluster compared to the waker CPU running only 1 task. By selecting +an idle CPU, it eliminates the chance of waker migrating to a different CPU +after the wakee preempts it. This policy is also not susceptible to the +incorrect "sync" usage i.e the waker does not goto sleep after waking up +the wakee. + +However LPM exit latency associated with an idle CPU outweigh the above +benefits on some targets. When this knob is turned on, the waker CPU is +selected if it has only 1 runnable task. + +*** 7.20 sched_freq_reporting_policy + +Appears at: /proc/sys/kernel/sched_freq_reporting_policy + +Default value: 0 + +This dictates what the load reporting policy to the governor should be. The +default value is FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK. Other values include +FREQ_REPORT_CPU_LOAD which only reports CPU load to the governor and +FREQ_REPORT_TOP_TASK which only reports the load of the top task on a CPU +to the governor. + +========================= +8. HMP SCHEDULER TRACE POINTS +========================= + +*** 8.1 sched_enq_deq_task + +Logged when a task is either enqueued or dequeued on a CPU's run queue. + + -0 [004] d.h4 12700.711665: sched_enq_deq_task: cpu=4 enqueue comm=powertop pid=13227 prio=120 nr_running=1 cpu_load=0 rt_nr_running=0 affine=ff demand=13364423 + +- cpu: the CPU that the task is being enqueued on to or dequeued off of +- enqueue/dequeue: whether this was an enqueue or dequeue event +- comm: name of task +- pid: PID of task +- prio: priority of task +- nr_running: number of runnable tasks on this CPU +- cpu_load: current priority-weighted load on the CPU (note, this is *not* + the same as CPU utilization or a metric tracked by PELT/window-based tracking) +- rt_nr_running: number of real-time processes running on this CPU +- affine: CPU affinity mask in hex for this task (so ff is a task eligible to + run on CPUs 0-7) +- demand: window-based task demand computed based on selected policy (recent, + max, or average) (ns) + +*** 8.2 sched_task_load + +Logged when selecting the best CPU to run the task (select_best_cpu()). + +sched_task_load: 4004 (adbd): demand=698425 boost=0 reason=0 sync=0 need_idle=0 best_cpu=0 latency=103177 + +- demand: window-based task demand computed based on selected policy (recent, + max, or average) (ns) +- boost: whether boost is in effect +- reason: reason we are picking a new CPU: + 0: no migration - selecting a CPU for a wakeup or new task wakeup + 1: move to big CPU (migration) + 2: move to little CPU (migration) + 3: move to low irq load CPU (migration) +- sync: is the nature synchronous in nature +- need_idle: is an idle CPU required for this task based on PF_WAKE_UP_IDLE +- best_cpu: The CPU selected by the select_best_cpu() function for placement +- latency: The execution time of the function select_best_cpu() + +*** 8.3 sched_cpu_load_* + +Logged when selecting the best CPU to run a task (select_best_cpu() for fair +class tasks, find_lowest_rq_hmp() for RT tasks) and load balancing +(update_sg_lb_stats()). + +-0 [004] d.h3 12700.711541: sched_cpu_load_*: cpu 0 idle 1 nr_run 0 nr_big 0 lsf 1119 capacity 1024 cr_avg 0 irqload 3301121 fcur 729600 fmax 1459200 power_cost 5 cstate 2 temp 38 + +- cpu: the CPU being described +- idle: boolean indicating whether the CPU is idle +- nr_run: number of tasks running on CPU +- nr_big: number of BIG tasks running on CPU +- lsf: load scale factor - multiply normalized load by this factor to determine + how much load task will exert on CPU +- capacity: capacity of CPU (based on max possible frequency and efficiency) +- cr_avg: cumulative runnable average, instantaneous sum of the demand (either + PELT or window-based) of all the runnable task on a CPU (ns) +- irqload: decaying average of irq activity on CPU (ns) +- fcur: current CPU frequency (Khz) +- fmax: max CPU frequency (but not maximum _possible_ frequency) (KHz) +- power_cost: cost of running this CPU at the current frequency +- cstate: current cstate of CPU +- temp: current temperature of the CPU + +The power_cost value above differs in how it is calculated depending on the +callsite of this tracepoint. The select_best_cpu() call to this tracepoint +finds the minimum frequency required to satisfy the existing load on the CPU +as well as the task being placed, and returns the power cost of that frequency. +The load balance and real time task placement paths used a fixed frequency +(highest frequency common to all CPUs for load balancing, minimum +frequency of the CPU for real time task placement). + +*** 8.4 sched_update_task_ravg + +Logged when window-based stats are updated for a task. The update may happen +for a variety of reasons, see section 2.5, "Task Events." + +rcu_preempt-7 [000] d..3 262857.738888: sched_update_task_ravg: wc 262857521127957 ws 262857490000000 delta 31127957 event PICK_NEXT_TASK cpu 0 cur_freq 291055 cur_pid 7 task 9309 (kworker/u16:0) ms 262857520627280 delta 500677 demand 282196 sum 156201 irqtime 0 pred_demand 267103 rq_cs 478718 rq_ps 0 cur_window 78433 (78433 0 0 0 0 0 0 0 ) prev_window 146430 (0 146430 0 0 0 0 0 0 ) nt_cs 0 nt_ps 0 active_wins 149 grp_cs 0 grp_ps 0, grp_nt_cs 0, grp_nt_ps: 0 curr_top 6 prev_top 2 + +- wc: wallclock, output of sched_clock(), monotonically increasing time since + boot (will roll over in 585 years) (ns) +- ws: window start, time when the current window started (ns) +- delta: time since the window started (wc - ws) (ns) +- event: What event caused this trace event to occur (see section 2.5 for more + details) +- cpu: which CPU the task is running on +- cur_freq: CPU's current frequency in KHz +- curr_pid: PID of the current running task (current) +- task: PID and name of task being updated +- ms: mark start - timestamp of the beginning of a segment of task activity, + either sleeping or runnable/running (ns) +- delta: time since last event within the window (wc - ms) (ns) +- demand: task demand computed based on selected policy (recent, max, or + average) (ns) +- sum: the task's run time during current window scaled by frequency and + efficiency (ns) +- irqtime: length of interrupt activity (ns). A non-zero irqtime is seen + when an idle cpu handles interrupts, the time for which needs to be + accounted as cpu busy time +- cs: curr_runnable_sum of cpu (ns). See section 6.1 for more details of this + counter. +- ps: prev_runnable_sum of cpu (ns). See section 6.1 for more details of this + counter. +- cur_window: cpu demand of task in its most recently tracked window summed up + across all CPUs (ns). This is followed by a list of contributions on each + individual CPU. +- prev_window: cpu demand of task in its previous window summed up across + all CPUs (ns). This is followed by a list of contributions on each individual + CPU. +- nt_cs: curr_runnable_sum of a cpu for new tasks only (ns). +- nt_ps: prev_runnable_sum of a cpu for new tasks only (ns). +- active_wins: No. of active windows since task statistics were initialized +- grp_cs: curr_runnable_sum for colocated tasks. This is independent from + cs described above. The addition of these two fields give the total CPU + load for the most recent window +- grp_ps: prev_runnable_sum for colocated tasks. This is independent from + ps described above. The addition of these two fields give the total CPU + load for the previous window. +- grp_nt_cs: curr_runnable_sum of a cpu for grouped new tasks only (ns). +- grp_nt_ps: prev_runnable_sum for a cpu for grouped new tasks only (ns). +- curr_top: index of the top task in the top_tasks array in the current + window for a CPU. +- prev_top: index of the top task in the top_tasks array in the previous + window for a CPU + +*** 8.5 sched_update_history + +Logged when update_task_ravg() is accounting task activity into one or +more windows that have completed. This may occur more than once for a +single call into update_task_ravg(). A task that ran for 24ms spanning +four 10ms windows (the last 2ms of window 1, all of windows 2 and 3, +and the first 2ms of window 4) would result in two calls into +update_history() from update_task_ravg(). The first call would record activity +in completed window 1 and second call would record activity for windows 2 and 3 +together (samples will be 2 in second call). + +-0 [004] d.h4 12700.711489: sched_update_history: 13227 (powertop): runtime 13364423 samples 1 event TASK_WAKE demand 13364423 (hist: 13364423 9871252 2236009 6162476 10282078) cpu 4 nr_big 0 + +- runtime: task cpu demand in recently completed window(s). This value is scaled + to max_possible_freq and max_possible_efficiency. This value is pushed into + task's demand history array. The number of windows to which runtime applies is + provided by samples field. +- samples: Number of samples (windows), each having value of runtime, that is + recorded in task's demand history array. +- event: What event caused this trace event to occur (see section 2.5 for more + details) - PUT_PREV_TASK, PICK_NEXT_TASK, TASK_WAKE, TASK_MIGRATE, + TASK_UPDATE +- demand: task demand computed based on selected policy (recent, max, or + average) (ns) +- hist: last 5 windows of history for the task with the most recent window + listed first +- cpu: CPU the task is associated with +- nr_big: number of big tasks on the CPU + +*** 8.6 sched_reset_all_windows_stats + +Logged when key parameters controlling window-based statistics collection are +changed. This event signifies that all window-based statistics for tasks and +cpus are being reset. Changes to below attributes result in such a reset: + +* sched_ravg_window (See Sec 2) +* sched_window_stats_policy (See Sec 2.4) +* sched_ravg_hist_size (See Sec 7.11) + +-0 [004] d.h4 12700.711489: sched_reset_all_windows_stats: time_taken 1123 window_start 0 window_size 0 reason POLICY_CHANGE old_val 0 new_val 1 + +- time_taken: time taken for the reset function to complete (ns) +- window_start: Beginning of first window following change to window size (ns) +- window_size: Size of window. Non-zero if window-size is changing (in ticks) +- reason: Reason for reset of statistics. +- old_val: Old value of variable, change of which is triggering reset +- new_val: New value of variable, change of which is triggering reset + +*** 8.7 sched_migration_update_sum + +Logged when a task is migrating to another cpu. + +-0 [000] d..8 5020.404137: sched_migration_update_sum: cpu 0: cs 471278 ps 902463 nt_cs 0 nt_ps 0 pid 2645 + +- cpu: cpu, away from which or to which, task is migrating +- cs: curr_runnable_sum of cpu (ns). See Sec 6.1 for more details of this + counter. +- ps: prev_runnable_sum of cpu (ns). See Sec 6.1 for more details of this + counter. +- nt_cs: nt_curr_runnable_sum of cpu (ns). See Sec 6.1 for more details of + this counter. +- nt_ps: nt_prev_runnable_sum of cpu (ns). See Sec 6.1 for more details of + this counter +- pid: PID of migrating task + +*** 8.8 sched_get_busy + +Logged when scheduler is returning busy time statistics for a cpu. + +<...>-4331 [003] d.s3 313.700108: sched_get_busy: cpu 3 load 19076 new_task_load 0 early 0 + + +- cpu: cpu, for which busy time statistic (prev_runnable_sum) is being + returned (ns) +- load: corresponds to prev_runnable_sum (ns), scaled to fmax of cpu +- new_task_load: corresponds to nt_prev_runnable_sum to fmax of cpu +- early: A flag indicating whether the scheduler is passing regular load or early detection load + 0 - regular load + 1 - early detection load + +*** 8.9 sched_freq_alert + +Logged when scheduler is alerting cpufreq governor about need to change +frequency + +-0 [004] d.h4 12700.711489: sched_freq_alert: cpu 0 old_load=XXX new_load=YYY + +- cpu: cpu in cluster that has highest load (prev_runnable_sum) +- old_load: cpu busy time last reported to governor. This is load scaled in + reference to max_possible_freq and max_possible_efficiency. +- new_load: recent cpu busy time. This is load scaled in + reference to max_possible_freq and max_possible_efficiency. + +*** 8.10 sched_set_boost + +Logged when boost settings are being changed + +-0 [004] d.h4 12700.711489: sched_set_boost: ref_count=1 + +- ref_count: A non-zero value indicates boost is in effect + +======================== +9. Device Tree bindings +======================== + +The device tree bindings for the HMP scheduler are defined in +Documentation/devicetree/bindings/sched/sched_hmp.txt diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index df2c100c79fd..6475fa234065 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -88,7 +88,6 @@ show up in /proc/sys/kernel: - sysctl_writes_strict - tainted - threads-max -- unprivileged_bpf_disabled - unknown_nmi_panic - watchdog - watchdog_thresh @@ -945,26 +944,6 @@ available RAM pages threads-max is reduced accordingly. ============================================================== -unprivileged_bpf_disabled: - -Writing 1 to this entry will disable unprivileged calls to bpf(); -once disabled, calling bpf() without CAP_SYS_ADMIN will return --EPERM. Once set to 1, this can't be cleared from the running kernel -anymore. - -Writing 2 to this entry will also disable unprivileged calls to bpf(), -however, an admin can still change this setting later on, if needed, by -writing 0 or 1 to this entry. - -If BPF_UNPRIV_DEFAULT_OFF is enabled in the kernel config, then this -entry will default to 2 instead of 0. - - 0 - Unprivileged calls to bpf() are enabled - 1 - Unprivileged calls to bpf() are disabled without recovery - 2 - Unprivileged calls to bpf() are disabled - -============================================================== - unknown_nmi_panic: The value in this file affects behavior of handling NMI. When the diff --git a/Documentation/vm/z3fold.txt b/Documentation/vm/z3fold.txt new file mode 100644 index 000000000000..38e4dac810b6 --- /dev/null +++ b/Documentation/vm/z3fold.txt @@ -0,0 +1,26 @@ +z3fold +------ + +z3fold is a special purpose allocator for storing compressed pages. +It is designed to store up to three compressed pages per physical page. +It is a zbud derivative which allows for higher compression +ratio keeping the simplicity and determinism of its predecessor. + +The main differences between z3fold and zbud are: +* unlike zbud, z3fold allows for up to PAGE_SIZE allocations +* z3fold can hold up to 3 compressed pages in its page +* z3fold doesn't export any API itself and is thus intended to be used + via the zpool API. + +To keep the determinism and simplicity, z3fold, just like zbud, always +stores an integral number of compressed pages per page, but it can store +up to 3 pages unlike zbud which can store at most 2. Therefore the +compression ratio goes to around 2.7x while zbud's one is around 1.7x. + +Unlike zbud (but like zsmalloc for that matter) z3fold_alloc() does not +return a dereferenceable pointer. Instead, it returns an unsigned long +handle which encodes actual location of the allocated object. + +Keeping effective compression ratio close to zsmalloc's, z3fold doesn't +depend on MMU enabled and provides more predictable reclaim behavior +which makes it a better fit for small and response-critical systems. diff --git a/Makefile b/Makefile index aa2d1ae1f90b..0d3b34ecf2b9 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 302 +SUBLEVEL = 247 EXTRAVERSION = NAME = Blurry Fish Butt @@ -349,7 +349,7 @@ OBJDUMP = $(CROSS_COMPILE)objdump AWK = awk GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel -DEPMOD = depmod +DEPMOD = /sbin/depmod PERL = perl PYTHON = python CHECK = sparse @@ -743,11 +743,12 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # See modpost pattern 2 KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) -endif +else # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) +endif KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) ifdef CONFIG_FRAME_POINTER @@ -786,13 +787,6 @@ ifdef CONFIG_FUNCTION_TRACER ifndef CC_FLAGS_FTRACE CC_FLAGS_FTRACE := -pg endif -ifdef CONFIG_FTRACE_MCOUNT_RECORD - # gcc 5 supports generating the mcount tables directly - ifeq ($(call cc-option-yn,-mrecord-mcount),y) - CC_FLAGS_FTRACE += -mrecord-mcount - export CC_USING_RECORD_MCOUNT := 1 - endif -endif export CC_FLAGS_FTRACE ifdef CONFIG_HAVE_FENTRY CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY) @@ -863,6 +857,12 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=strict-prototypes) # Prohibit date/time macros, which would make the build non-deterministic KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) +# ensure -fcf-protection is disabled when using retpoline as it is +# incompatible with -mindirect-branch=thunk-extern +ifdef CONFIG_RETPOLINE +KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) +endif + # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) @@ -1101,7 +1101,7 @@ endef define filechk_version.h (echo \#define LINUX_VERSION_CODE $(shell \ - expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \ + expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) endef diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index f5cb9b28e5fb..74252220cb46 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -3,7 +3,6 @@ generic-y += clkdev.h generic-y += cputime.h generic-y += exec.h -generic-y += export.h generic-y += hash.h generic-y += irq_work.h generic-y += mcs_spinlock.h diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index e55a5e6ab460..355aec0867f4 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae) * Change virtual addresses to physical addresses and vv. */ #ifdef USE_48_BIT_KSEG -static inline unsigned long virt_to_phys(volatile void *address) +static inline unsigned long virt_to_phys(void *address) { return (unsigned long)address - IDENT_ADDR; } @@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address) return (void *) (address + IDENT_ADDR); } #else -static inline unsigned long virt_to_phys(volatile void *address) +static inline unsigned long virt_to_phys(void *address) { unsigned long phys = (unsigned long)address; @@ -111,7 +111,7 @@ static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) extern unsigned long __direct_map_base; extern unsigned long __direct_map_size; -static inline unsigned long __deprecated virt_to_bus(volatile void *address) +static inline unsigned long __deprecated virt_to_bus(void *address) { unsigned long phys = virt_to_phys(address); unsigned long bus = phys + __direct_map_base; diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 89413a29cb07..c0ddbbf73400 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -341,17 +341,45 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ * Complex access routines */ -extern long __copy_user(void *to, const void *from, long len); +/* This little bit of silliness is to get the GP loaded for a function + that ordinarily wouldn't. Otherwise we could have it done by the macro + directly, which can be optimized the linker. */ +#ifdef MODULE +#define __module_address(sym) "r"(sym), +#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym +#else +#define __module_address(sym) +#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" +#endif + +extern void __copy_user(void); + +extern inline long +__copy_tofrom_user_nocheck(void *to, const void *from, long len) +{ + register void * __cu_to __asm__("$6") = to; + register const void * __cu_from __asm__("$7") = from; + register long __cu_len __asm__("$0") = len; + + __asm__ __volatile__( + __module_call(28, 3, __copy_user) + : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) + : __module_address(__copy_user) + "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); + + return __cu_len; +} -#define __copy_to_user(to, from, n) \ -({ \ - __chk_user_ptr(to); \ - __copy_user((__force void *)(to), (from), (n)); \ +#define __copy_to_user(to, from, n) \ +({ \ + __chk_user_ptr(to); \ + __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ }) -#define __copy_from_user(to, from, n) \ -({ \ - __chk_user_ptr(from); \ - __copy_user((to), (__force void *)(from), (n)); \ +#define __copy_from_user(to, from, n) \ +({ \ + __chk_user_ptr(from); \ + __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ }) #define __copy_to_user_inatomic __copy_to_user @@ -361,22 +389,35 @@ extern inline long copy_to_user(void __user *to, const void *from, long n) { if (likely(__access_ok((unsigned long)to, n, get_fs()))) - n = __copy_user((__force void *)to, from, n); + n = __copy_tofrom_user_nocheck((__force void *)to, from, n); return n; } extern inline long copy_from_user(void *to, const void __user *from, long n) { - long res = n; if (likely(__access_ok((unsigned long)from, n, get_fs()))) - res = __copy_from_user_inatomic(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; + n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); + else + memset(to, 0, n); + return n; } -extern long __clear_user(void __user *to, long len); +extern void __do_clear_user(void); + +extern inline long +__clear_user(void __user *to, long len) +{ + register void __user * __cl_to __asm__("$6") = to; + register long __cl_len __asm__("$0") = len; + __asm__ __volatile__( + __module_call(28, 2, __do_clear_user) + : "=r"(__cl_len), "=r"(__cl_to) + : __module_address(__do_clear_user) + "0"(__cl_len), "1"(__cl_to) + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); + return __cl_len; +} extern inline long clear_user(void __user *to, long len) @@ -386,6 +427,9 @@ clear_user(void __user *to, long len) return len; } +#undef __module_address +#undef __module_call + #define user_addr_max() \ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index 8ce13d7a2ad3..3ecac0106c8a 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -8,7 +8,7 @@ ccflags-y := -Wno-sign-compare obj-y := entry.o traps.o process.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ - systbls.o err_common.o io.o + alpha_ksyms.o systbls.o err_common.o io.o obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c new file mode 100644 index 000000000000..f4c7ab6f43b0 --- /dev/null +++ b/arch/alpha/kernel/alpha_ksyms.c @@ -0,0 +1,102 @@ +/* + * linux/arch/alpha/kernel/alpha_ksyms.c + * + * Export the alpha-specific functions that are needed for loadable + * modules. + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* these are C runtime functions with special calling conventions: */ +extern void __divl (void); +extern void __reml (void); +extern void __divq (void); +extern void __remq (void); +extern void __divlu (void); +extern void __remlu (void); +extern void __divqu (void); +extern void __remqu (void); + +EXPORT_SYMBOL(alpha_mv); +EXPORT_SYMBOL(callback_getenv); +EXPORT_SYMBOL(callback_setenv); +EXPORT_SYMBOL(callback_save_env); + +/* platform dependent support */ +EXPORT_SYMBOL(strcat); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strncpy); +EXPORT_SYMBOL(strncat); +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strrchr); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__memcpy); +EXPORT_SYMBOL(__memset); +EXPORT_SYMBOL(___memset); +EXPORT_SYMBOL(__memsetw); +EXPORT_SYMBOL(__constant_c_memset); +EXPORT_SYMBOL(copy_page); +EXPORT_SYMBOL(clear_page); + +EXPORT_SYMBOL(alpha_read_fp_reg); +EXPORT_SYMBOL(alpha_read_fp_reg_s); +EXPORT_SYMBOL(alpha_write_fp_reg); +EXPORT_SYMBOL(alpha_write_fp_reg_s); + +/* Networking helper routines. */ +EXPORT_SYMBOL(csum_tcpudp_magic); +EXPORT_SYMBOL(ip_compute_csum); +EXPORT_SYMBOL(ip_fast_csum); +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_ipv6_magic); + +#ifdef CONFIG_MATHEMU_MODULE +extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); +extern long (*alpha_fp_emul) (unsigned long pc); +EXPORT_SYMBOL(alpha_fp_emul_imprecise); +EXPORT_SYMBOL(alpha_fp_emul); +#endif + +/* + * The following are specially called from the uaccess assembly stubs. + */ +EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__do_clear_user); + +/* + * SMP-specific symbols. + */ + +#ifdef CONFIG_SMP +EXPORT_SYMBOL(_atomic_dec_and_lock); +#endif /* CONFIG_SMP */ + +/* + * The following are special because they're not called + * explicitly (the C compiler or assembler generates them in + * response to division operations). Fortunately, their + * interface isn't gonna change any time soon now, so it's OK + * to leave it out of version control. + */ +# undef memcpy +# undef memset +EXPORT_SYMBOL(__divl); +EXPORT_SYMBOL(__divlu); +EXPORT_SYMBOL(__divq); +EXPORT_SYMBOL(__divqu); +EXPORT_SYMBOL(__reml); +EXPORT_SYMBOL(__remlu); +EXPORT_SYMBOL(__remq); +EXPORT_SYMBOL(__remqu); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memchr); diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h index 8c6516025efb..f54bdf658cd0 100644 --- a/arch/alpha/kernel/machvec_impl.h +++ b/arch/alpha/kernel/machvec_impl.h @@ -144,11 +144,9 @@ else beforehand. Fine. We'll do it ourselves. */ #if 0 #define ALIAS_MV(system) \ - struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \ - EXPORT_SYMBOL(alpha_mv); + struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); #else #define ALIAS_MV(system) \ - asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \ - EXPORT_SYMBOL(alpha_mv); + asm(".global alpha_mv\nalpha_mv = " #system "_mv"); #endif #endif /* GENERIC */ diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 4811e54069fc..b20af76f12c1 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -115,7 +115,6 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; #ifdef CONFIG_ALPHA_GENERIC struct alpha_machine_vector alpha_mv; -EXPORT_SYMBOL(alpha_mv); #endif #ifndef alpha_using_srm diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 1543b571bd99..2f24447fef92 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -584,7 +584,7 @@ void smp_send_stop(void) { cpumask_t to_whom; - cpumask_copy(&to_whom, cpu_online_mask); + cpumask_copy(&to_whom, cpu_possible_mask); cpumask_clear_cpu(smp_processor_id(), &to_whom); #ifdef DEBUG_IPI_MSG if (hard_smp_processor_id() != boot_cpu_id) diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile index a80815960364..59660743237c 100644 --- a/arch/alpha/lib/Makefile +++ b/arch/alpha/lib/Makefile @@ -20,8 +20,12 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ checksum.o \ csum_partial_copy.o \ $(ev67-y)strlen.o \ - stycpy.o \ - styncpy.o \ + $(ev67-y)strcat.o \ + strcpy.o \ + $(ev67-y)strncat.o \ + strncpy.o \ + $(ev6-y)stxcpy.o \ + $(ev6-y)stxncpy.o \ $(ev67-y)strchr.o \ $(ev67-y)strrchr.o \ $(ev6-y)memchr.o \ @@ -42,20 +46,11 @@ AFLAGS___remqu.o = -DREM AFLAGS___divlu.o = -DDIV -DINTSIZE AFLAGS___remlu.o = -DREM -DINTSIZE -$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \ - $(src)/$(ev6-y)divide.S FORCE - $(call if_changed_rule,as_o_S) - -# There are direct branches between {str*cpy,str*cat} and stx*cpy. -# Ensure the branches are within range by merging these objects. - -LDFLAGS_stycpy.o := -r -LDFLAGS_styncpy.o := -r - -$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \ - $(obj)/$(ev6-y)stxcpy.o FORCE - $(call if_changed,ld) - -$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \ - $(obj)/$(ev6-y)stxncpy.o FORCE - $(call if_changed,ld) +$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S + $(cmd_as_o_S) +$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S + $(cmd_as_o_S) +$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S + $(cmd_as_o_S) +$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S + $(cmd_as_o_S) diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S index 6093addc931a..8804bec2c644 100644 --- a/arch/alpha/lib/callback_srm.S +++ b/arch/alpha/lib/callback_srm.S @@ -3,7 +3,6 @@ */ #include -#include .text #define HWRPB_CRB_OFFSET 0xc0 @@ -93,10 +92,6 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4) CALLBACK(save_env, CCB_SAVE_ENV, 1) CALLBACK(pswitch, CCB_PSWITCH, 3) CALLBACK(bios_emul, CCB_BIOS_EMUL, 5) - -EXPORT_SYMBOL(callback_getenv) -EXPORT_SYMBOL(callback_setenv) -EXPORT_SYMBOL(callback_save_env) .data __alpha_using_srm: # For use by bootpheader diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 65197c3c0845..199f6efa83fa 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -50,7 +50,6 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8)); } -EXPORT_SYMBOL(csum_tcpudp_magic); __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, @@ -149,7 +148,6 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { return (__force __sum16)~do_csum(iph,ihl*4); } -EXPORT_SYMBOL(ip_fast_csum); /* * computes the checksum of a memory block at buff, length len, @@ -184,4 +182,3 @@ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~from64to16(do_csum(buff,len)); } -EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S index 263d7393c0e7..a221ae266e29 100644 --- a/arch/alpha/lib/clear_page.S +++ b/arch/alpha/lib/clear_page.S @@ -3,7 +3,7 @@ * * Zero an entire page. */ -#include + .text .align 4 .global clear_page @@ -37,4 +37,3 @@ clear_page: nop .end clear_page - EXPORT_SYMBOL(clear_page) diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S index 006f469fef73..8860316c1957 100644 --- a/arch/alpha/lib/clear_user.S +++ b/arch/alpha/lib/clear_user.S @@ -8,8 +8,22 @@ * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. + * + * NOTE! This is not directly C-callable, because the calling semantics + * are different: + * + * Inputs: + * length in $0 + * destination address in $6 + * exception pointer in $7 + * return address in $28 (exceptions expect it there) + * + * Outputs: + * bytes left to copy in $0 + * + * Clobbers: + * $1,$2,$3,$4,$5,$6 */ -#include /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ @@ -23,63 +37,62 @@ .set noreorder .align 4 - .globl __clear_user - .ent __clear_user - .frame $30, 0, $26 + .globl __do_clear_user + .ent __do_clear_user + .frame $30, 0, $28 .prologue 0 $loop: and $1, 3, $4 # e0 : beq $4, 1f # .. e1 : -0: EX( stq_u $31, 0($16) ) # e0 : zero one word +0: EX( stq_u $31, 0($6) ) # e0 : zero one word subq $0, 8, $0 # .. e1 : subq $4, 1, $4 # e0 : - addq $16, 8, $16 # .. e1 : + addq $6, 8, $6 # .. e1 : bne $4, 0b # e1 : unop # : 1: bic $1, 3, $1 # e0 : beq $1, $tail # .. e1 : -2: EX( stq_u $31, 0($16) ) # e0 : zero four words +2: EX( stq_u $31, 0($6) ) # e0 : zero four words subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 8($16) ) # e0 : + EX( stq_u $31, 8($6) ) # e0 : subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 16($16) ) # e0 : + EX( stq_u $31, 16($6) ) # e0 : subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 24($16) ) # e0 : + EX( stq_u $31, 24($6) ) # e0 : subq $0, 8, $0 # .. e1 : subq $1, 4, $1 # e0 : - addq $16, 32, $16 # .. e1 : + addq $6, 32, $6 # .. e1 : bne $1, 2b # e1 : $tail: bne $2, 1f # e1 : is there a tail to do? - ret $31, ($26), 1 # .. e1 : + ret $31, ($28), 1 # .. e1 : -1: EX( ldq_u $5, 0($16) ) # e0 : +1: EX( ldq_u $5, 0($6) ) # e0 : clr $0 # .. e1 : nop # e1 : mskqh $5, $0, $5 # e0 : - EX( stq_u $5, 0($16) ) # e0 : - ret $31, ($26), 1 # .. e1 : + EX( stq_u $5, 0($6) ) # e0 : + ret $31, ($28), 1 # .. e1 : -__clear_user: - and $17, $17, $0 - and $16, 7, $4 # e0 : find dest misalignment +__do_clear_user: + and $6, 7, $4 # e0 : find dest misalignment beq $0, $zerolength # .. e1 : addq $0, $4, $1 # e0 : bias counter and $1, 7, $2 # e1 : number of bytes in tail srl $1, 3, $1 # e0 : beq $4, $loop # .. e1 : - EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in + EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in beq $1, $oneword # .. e1 : sub-word store? - mskql $5, $16, $5 # e0 : take care of misaligned head - addq $16, 8, $16 # .. e1 : - EX( stq_u $5, -8($16) ) # e0 : + mskql $5, $6, $5 # e0 : take care of misaligned head + addq $6, 8, $6 # .. e1 : + EX( stq_u $5, -8($6) ) # e0 : addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment subq $1, 1, $1 # e0 : subq $0, 8, $0 # .. e1 : @@ -87,15 +100,14 @@ __clear_user: unop # : $oneword: - mskql $5, $16, $4 # e0 : + mskql $5, $6, $4 # e0 : mskqh $5, $2, $5 # e0 : or $5, $4, $5 # e1 : - EX( stq_u $5, 0($16) ) # e0 : + EX( stq_u $5, 0($6) ) # e0 : clr $0 # .. e1 : $zerolength: $exception: - ret $31, ($26), 1 # .. e1 : + ret $31, ($28), 1 # .. e1 : - .end __clear_user - EXPORT_SYMBOL(__clear_user) + .end __do_clear_user diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S index 2ee0bd0508c5..9f3b97459cc6 100644 --- a/arch/alpha/lib/copy_page.S +++ b/arch/alpha/lib/copy_page.S @@ -3,7 +3,7 @@ * * Copy an entire page. */ -#include + .text .align 4 .global copy_page @@ -47,4 +47,3 @@ copy_page: nop .end copy_page - EXPORT_SYMBOL(copy_page) diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S index 159f1b7e6e49..6f3fab9eb434 100644 --- a/arch/alpha/lib/copy_user.S +++ b/arch/alpha/lib/copy_user.S @@ -9,10 +9,23 @@ * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. + * + * NOTE! This is not directly C-callable, because the calling semantics are + * different: + * + * Inputs: + * length in $0 + * destination address in $6 + * source address in $7 + * return address in $28 + * + * Outputs: + * bytes left to copy in $0 + * + * Clobbers: + * $1,$2,$3,$4,$5,$6,$7 */ -#include - /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ @@ -34,59 +47,58 @@ .ent __copy_user __copy_user: .prologue 0 - and $18,$18,$0 - and $16,7,$3 + and $6,7,$3 beq $0,$35 beq $3,$36 subq $3,8,$3 .align 4 $37: - EXI( ldq_u $1,0($17) ) - EXO( ldq_u $2,0($16) ) - extbl $1,$17,$1 - mskbl $2,$16,$2 - insbl $1,$16,$1 + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) + extbl $1,$7,$1 + mskbl $2,$6,$2 + insbl $1,$6,$1 addq $3,1,$3 bis $1,$2,$1 - EXO( stq_u $1,0($16) ) + EXO( stq_u $1,0($6) ) subq $0,1,$0 - addq $16,1,$16 - addq $17,1,$17 + addq $6,1,$6 + addq $7,1,$7 beq $0,$41 bne $3,$37 $36: - and $17,7,$1 + and $7,7,$1 bic $0,7,$4 beq $1,$43 beq $4,$48 - EXI( ldq_u $3,0($17) ) + EXI( ldq_u $3,0($7) ) .align 4 $50: - EXI( ldq_u $2,8($17) ) + EXI( ldq_u $2,8($7) ) subq $4,8,$4 - extql $3,$17,$3 - extqh $2,$17,$1 + extql $3,$7,$3 + extqh $2,$7,$1 bis $3,$1,$1 - EXO( stq $1,0($16) ) - addq $17,8,$17 + EXO( stq $1,0($6) ) + addq $7,8,$7 subq $0,8,$0 - addq $16,8,$16 + addq $6,8,$6 bis $2,$2,$3 bne $4,$50 $48: beq $0,$41 .align 4 $57: - EXI( ldq_u $1,0($17) ) - EXO( ldq_u $2,0($16) ) - extbl $1,$17,$1 - mskbl $2,$16,$2 - insbl $1,$16,$1 + EXI( ldq_u $1,0($7) ) + EXO( ldq_u $2,0($6) ) + extbl $1,$7,$1 + mskbl $2,$6,$2 + insbl $1,$6,$1 bis $1,$2,$1 - EXO( stq_u $1,0($16) ) + EXO( stq_u $1,0($6) ) subq $0,1,$0 - addq $16,1,$16 - addq $17,1,$17 + addq $6,1,$6 + addq $7,1,$7 bne $0,$57 br $31,$41 .align 4 @@ -94,27 +106,40 @@ $43: beq $4,$65 .align 4 $66: - EXI( ldq $1,0($17) ) + EXI( ldq $1,0($7) ) subq $4,8,$4 - EXO( stq $1,0($16) ) - addq $17,8,$17 + EXO( stq $1,0($6) ) + addq $7,8,$7 subq $0,8,$0 - addq $16,8,$16 + addq $6,8,$6 bne $4,$66 $65: beq $0,$41 - EXI( ldq $2,0($17) ) - EXO( ldq $1,0($16) ) + EXI( ldq $2,0($7) ) + EXO( ldq $1,0($6) ) mskql $2,$0,$2 mskqh $1,$0,$1 bis $2,$1,$2 - EXO( stq $2,0($16) ) + EXO( stq $2,0($6) ) bis $31,$31,$0 $41: $35: -$exitin: $exitout: - ret $31,($26),1 + ret $31,($28),1 + +$exitin: + /* A stupid byte-by-byte zeroing of the rest of the output + buffer. This cures security holes by never leaving + random kernel data around to be copied elsewhere. */ + + mov $0,$1 +$101: + EXO ( ldq_u $2,0($6) ) + subq $1,1,$1 + mskbl $2,$6,$2 + EXO ( stq_u $2,0($6) ) + addq $6,1,$6 + bgt $1,$101 + ret $31,($28),1 .end __copy_user -EXPORT_SYMBOL(__copy_user) diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S index e74b4544b0cc..2c2acb96deb6 100644 --- a/arch/alpha/lib/csum_ipv6_magic.S +++ b/arch/alpha/lib/csum_ipv6_magic.S @@ -12,7 +12,6 @@ * added by Ivan Kokshaysky */ -#include .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic @@ -114,4 +113,3 @@ csum_ipv6_magic: ret # .. e1 : .end csum_ipv6_magic - EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index b4ff3b683bcd..5675dca8dbb1 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -374,7 +374,6 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, } return (__force __wsum)checksum; } -EXPORT_SYMBOL(csum_partial_copy_from_user); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@ -387,4 +386,3 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) set_fs(oldfs); return checksum; } -EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c index 4221b40167ee..f9f5fe830e9f 100644 --- a/arch/alpha/lib/dec_and_lock.c +++ b/arch/alpha/lib/dec_and_lock.c @@ -7,7 +7,6 @@ #include #include -#include asm (".text \n\ .global _atomic_dec_and_lock \n\ @@ -40,4 +39,3 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock) spin_unlock(lock); return 0; } -EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S index 1e33bd127621..2d1a0484a99e 100644 --- a/arch/alpha/lib/divide.S +++ b/arch/alpha/lib/divide.S @@ -45,7 +45,6 @@ * $28 - compare status */ -#include #define halt .long 0 /* @@ -152,7 +151,6 @@ ufunction: addq $30,STACK,$30 ret $31,($23),1 .end ufunction -EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but @@ -195,4 +193,3 @@ sfunction: addq $30,STACK,$30 ret $31,($23),1 .end sfunction -EXPORT_SYMBOL(sfunction) diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S index abe99e69a194..adf4f7be0e2b 100644 --- a/arch/alpha/lib/ev6-clear_page.S +++ b/arch/alpha/lib/ev6-clear_page.S @@ -3,7 +3,7 @@ * * Zero an entire page. */ -#include + .text .align 4 .global clear_page @@ -52,4 +52,3 @@ clear_page: nop .end clear_page - EXPORT_SYMBOL(clear_page) diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S index e179e4757ef8..4f42a16b7f53 100644 --- a/arch/alpha/lib/ev6-clear_user.S +++ b/arch/alpha/lib/ev6-clear_user.S @@ -9,6 +9,21 @@ * a successful copy). There is also some rather minor exception setup * stuff. * + * NOTE! This is not directly C-callable, because the calling semantics + * are different: + * + * Inputs: + * length in $0 + * destination address in $6 + * exception pointer in $7 + * return address in $28 (exceptions expect it there) + * + * Outputs: + * bytes left to copy in $0 + * + * Clobbers: + * $1,$2,$3,$4,$5,$6 + * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here @@ -28,7 +43,6 @@ * want to leave a hole (and we also want to avoid repeating lots of work) */ -#include /* Allow an exception for an insn; exit if we get one. */ #define EX(x,y...) \ 99: x,##y; \ @@ -41,15 +55,14 @@ .set noreorder .align 4 - .globl __clear_user - .ent __clear_user - .frame $30, 0, $26 + .globl __do_clear_user + .ent __do_clear_user + .frame $30, 0, $28 .prologue 0 # Pipeline info : Slotting & Comments -__clear_user: - and $17, $17, $0 - and $16, 7, $4 # .. E .. .. : find dest head misalignment +__do_clear_user: + and $6, 7, $4 # .. E .. .. : find dest head misalignment beq $0, $zerolength # U .. .. .. : U L U L addq $0, $4, $1 # .. .. .. E : bias counter @@ -61,14 +74,14 @@ __clear_user: /* * Head is not aligned. Write (8 - $4) bytes to head of destination - * This means $16 is known to be misaligned + * This means $6 is known to be misaligned */ - EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in + EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in beq $1, $onebyte # .. .. U .. : sub-word store? - mskql $5, $16, $5 # .. U .. .. : take care of misaligned head - addq $16, 8, $16 # E .. .. .. : L U U L + mskql $5, $6, $5 # .. U .. .. : take care of misaligned head + addq $6, 8, $6 # E .. .. .. : L U U L - EX( stq_u $5, -8($16) ) # .. .. .. L : + EX( stq_u $5, -8($6) ) # .. .. .. L : subq $1, 1, $1 # .. .. E .. : addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment subq $0, 8, $0 # E .. .. .. : U L U L @@ -79,11 +92,11 @@ __clear_user: * values upon initial entry to the loop * $1 is number of quadwords to clear (zero is a valid value) * $2 is number of trailing bytes (0..7) ($2 never used...) - * $16 is known to be aligned 0mod8 + * $6 is known to be aligned 0mod8 */ $headalign: subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop - and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop + and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) blt $4, $trailquad # U .. .. .. : U L U L @@ -100,21 +113,21 @@ $headalign: beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 $alignmod64: - EX( stq_u $31, 0($16) ) # .. .. .. L + EX( stq_u $31, 0($6) ) # .. .. .. L addq $3, 8, $3 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E subq $1, 1, $1 # .. .. E .. - addq $16, 8, $16 # .. E .. .. + addq $6, 8, $6 # .. E .. .. blt $3, $alignmod64 # U .. .. .. : U L U L $bigalign: /* * $0 is the number of bytes left * $1 is the number of quads left - * $16 is aligned 0mod64 + * $6 is aligned 0mod64 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * We are _not_ going to update $0 after every single store. That @@ -131,39 +144,39 @@ $bigalign: nop # E : nop # E : nop # E : - bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest + bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest /* This might actually help for the current trip... */ $do_wh64: wh64 ($3) # .. .. .. L1 : memory subsystem hint subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? - EX( stq_u $31, 0($16) ) # .. L .. .. + EX( stq_u $31, 0($6) ) # .. L .. .. subq $0, 8, $0 # E .. .. .. : U L U L - addq $16, 128, $3 # E : Target address of wh64 - EX( stq_u $31, 8($16) ) # L : - EX( stq_u $31, 16($16) ) # L : + addq $6, 128, $3 # E : Target address of wh64 + EX( stq_u $31, 8($6) ) # L : + EX( stq_u $31, 16($6) ) # L : subq $0, 16, $0 # E : U L L U nop # E : - EX( stq_u $31, 24($16) ) # L : - EX( stq_u $31, 32($16) ) # L : + EX( stq_u $31, 24($6) ) # L : + EX( stq_u $31, 32($6) ) # L : subq $0, 168, $5 # E : U L L U : two trips through the loop left? /* 168 = 192 - 24, since we've already completed some stores */ subq $0, 16, $0 # E : - EX( stq_u $31, 40($16) ) # L : - EX( stq_u $31, 48($16) ) # L : - cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle + EX( stq_u $31, 40($6) ) # L : + EX( stq_u $31, 48($6) ) # L : + cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle subq $1, 8, $1 # E : subq $0, 16, $0 # E : - EX( stq_u $31, 56($16) ) # L : + EX( stq_u $31, 56($6) ) # L : nop # E : U L U L nop # E : subq $0, 8, $0 # E : - addq $16, 64, $16 # E : + addq $6, 64, $6 # E : bge $4, $do_wh64 # U : U L U L $trailquad: @@ -176,14 +189,14 @@ $trailquad: beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go $onequad: - EX( stq_u $31, 0($16) ) # .. .. .. L + EX( stq_u $31, 0($6) ) # .. .. .. L subq $1, 1, $1 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. - addq $16, 8, $16 # .. E .. .. + addq $6, 8, $6 # .. E .. .. bgt $1, $onequad # U .. .. .. : U L U L # We have an unknown number of bytes left to go. @@ -197,9 +210,9 @@ $trailbytes: # so we will use $0 as the loop counter # We know for a fact that $0 > 0 zero due to previous context $onebyte: - EX( stb $31, 0($16) ) # .. .. .. L + EX( stb $31, 0($6) ) # .. .. .. L subq $0, 1, $0 # .. .. E .. : - addq $16, 1, $16 # .. E .. .. : + addq $6, 1, $6 # .. E .. .. : bgt $0, $onebyte # U .. .. .. : U L U L $zerolength: @@ -207,6 +220,6 @@ $exception: # Destination for exception recovery(?) nop # .. .. .. E : nop # .. .. E .. : nop # .. E .. .. : - ret $31, ($26), 1 # L0 .. .. .. : L U L U - .end __clear_user - EXPORT_SYMBOL(__clear_user) + ret $31, ($28), 1 # L0 .. .. .. : L U L U + .end __do_clear_user + diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S index 77935061bddb..b789db192754 100644 --- a/arch/alpha/lib/ev6-copy_page.S +++ b/arch/alpha/lib/ev6-copy_page.S @@ -56,7 +56,7 @@ destination pages are in the dcache, but it is my guess that this is less important than the dcache miss case. */ -#include + .text .align 4 .global copy_page @@ -201,4 +201,3 @@ copy_page: nop .end copy_page - EXPORT_SYMBOL(copy_page) diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S index 35e6710d0700..db42ffe9c350 100644 --- a/arch/alpha/lib/ev6-copy_user.S +++ b/arch/alpha/lib/ev6-copy_user.S @@ -12,6 +12,21 @@ * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. * + * NOTE! This is not directly C-callable, because the calling semantics are + * different: + * + * Inputs: + * length in $0 + * destination address in $6 + * source address in $7 + * return address in $28 + * + * Outputs: + * bytes left to copy in $0 + * + * Clobbers: + * $1,$2,$3,$4,$5,$6,$7 + * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here @@ -22,7 +37,6 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ -#include /* Allow an exception for an insn; exit if we get one. */ #define EXI(x,y...) \ 99: x,##y; \ @@ -45,11 +59,10 @@ # Pipeline info: Slotting & Comments __copy_user: .prologue 0 - andq $18, $18, $0 - subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? + subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? beq $0, $zerolength # U .. .. .. : U L U L - and $16,7,$3 # .. .. .. E : is leading dest misalignment + and $6,7,$3 # .. .. .. E : is leading dest misalignment ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) subq $3, 8, $3 # E .. .. .. : L U U L : trip counter @@ -59,17 +72,17 @@ __copy_user: * We know we have at least one trip through this loop */ $aligndest: - EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores - addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG + EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores + addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG addq $3,1,$3 # .. E .. .. : nop # E .. .. .. : U L U L /* - * the -1 is to compensate for the inc($16) done in a previous quadpack + * the -1 is to compensate for the inc($6) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ - EXO( stb $1,-1($16) ) # .. .. .. L : - addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG + EXO( stb $1,-1($6) ) # .. .. .. L : + addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG subq $0,1,$0 # .. E .. .. : bne $3, $aligndest # U .. .. .. : U L U L @@ -78,29 +91,29 @@ $aligndest: * If we arrived via branch, we have a minimum of 32 bytes */ $destaligned: - and $17,7,$1 # .. .. .. E : Check _current_ source alignment + and $7,7,$1 # .. .. .. E : Check _current_ source alignment bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop - EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code + EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code beq $1,$quadaligned # U .. .. .. : U L U L /* - * In the worst case, we've just executed an ldq_u here from 0($17) + * In the worst case, we've just executed an ldq_u here from 0($7) * and we'll repeat it once if we take the branch */ /* Misaligned quadword loop - not unrolled. Leave it that way. */ $misquad: - EXI( ldq_u $2,8($17) ) # .. .. .. L : + EXI( ldq_u $2,8($7) ) # .. .. .. L : subq $4,8,$4 # .. .. E .. : - extql $3,$17,$3 # .. U .. .. : - extqh $2,$17,$1 # U .. .. .. : U U L L + extql $3,$7,$3 # .. U .. .. : + extqh $2,$7,$1 # U .. .. .. : U U L L bis $3,$1,$1 # .. .. .. E : - EXO( stq $1,0($16) ) # .. .. L .. : - addq $17,8,$17 # .. E .. .. : + EXO( stq $1,0($6) ) # .. .. L .. : + addq $7,8,$7 # .. E .. .. : subq $0,8,$0 # E .. .. .. : U L L U - addq $16,8,$16 # .. .. .. E : + addq $6,8,$6 # .. .. .. E : bis $2,$2,$3 # .. .. E .. : nop # .. E .. .. : bne $4,$misquad # U .. .. .. : U L U L @@ -111,8 +124,8 @@ $misquad: beq $0,$zerolength # U .. .. .. : U L U L /* We know we have at least one trip through the byte loop */ - EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad - addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad + addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : br $31, $dirtyentry # L0 .. .. .. : L U U L /* Do the trailing byte loop load, then hop into the store part of the loop */ @@ -122,8 +135,8 @@ $misquad: * Based upon the usage context, it's worth the effort to unroll this loop * $0 - number of bytes to be moved * $4 - number of bytes to move as quadwords - * $16 is current destination address - * $17 is current source address + * $6 is current destination address + * $7 is current source address */ $quadaligned: subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff @@ -141,29 +154,29 @@ $quadaligned: * instruction memory hint instruction). */ $unroll4: - EXI( ldq $1,0($17) ) # .. .. .. L - EXI( ldq $2,8($17) ) # .. .. L .. + EXI( ldq $1,0($7) ) # .. .. .. L + EXI( ldq $2,8($7) ) # .. .. L .. subq $4,32,$4 # .. E .. .. nop # E .. .. .. : U U L L - addq $17,16,$17 # .. .. .. E - EXO( stq $1,0($16) ) # .. .. L .. - EXO( stq $2,8($16) ) # .. L .. .. + addq $7,16,$7 # .. .. .. E + EXO( stq $1,0($6) ) # .. .. L .. + EXO( stq $2,8($6) ) # .. L .. .. subq $0,16,$0 # E .. .. .. : U L L U - addq $16,16,$16 # .. .. .. E - EXI( ldq $1,0($17) ) # .. .. L .. - EXI( ldq $2,8($17) ) # .. L .. .. + addq $6,16,$6 # .. .. .. E + EXI( ldq $1,0($7) ) # .. .. L .. + EXI( ldq $2,8($7) ) # .. L .. .. subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? - EXO( stq $1,0($16) ) # .. .. .. L - EXO( stq $2,8($16) ) # .. .. L .. + EXO( stq $1,0($6) ) # .. .. .. L + EXO( stq $2,8($6) ) # .. .. L .. subq $0,16,$0 # .. E .. .. - addq $17,16,$17 # E .. .. .. : U L L U + addq $7,16,$7 # E .. .. .. : U L L U nop # .. .. .. E nop # .. .. E .. - addq $16,16,$16 # .. E .. .. + addq $6,16,$6 # .. E .. .. bgt $3,$unroll4 # U .. .. .. : U L U L nop @@ -172,14 +185,14 @@ $unroll4: beq $4, $noquads $onequad: - EXI( ldq $1,0($17) ) + EXI( ldq $1,0($7) ) subq $4,8,$4 - addq $17,8,$17 + addq $7,8,$7 nop - EXO( stq $1,0($16) ) + EXO( stq $1,0($6) ) subq $0,8,$0 - addq $16,8,$16 + addq $6,8,$6 bne $4,$onequad $noquads: @@ -193,33 +206,54 @@ $noquads: * There's no point in doing a lot of complex alignment calculations to try to * to quadword stuff for a small amount of data. * $0 - remaining number of bytes left to copy - * $16 - current dest addr - * $17 - current source addr + * $6 - current dest addr + * $7 - current source addr */ $onebyteloop: - EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad - addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) + EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad + addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : nop # E .. .. .. : U L U L $dirtyentry: /* - * the -1 is to compensate for the inc($16) done in a previous quadpack + * the -1 is to compensate for the inc($6) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ - EXO ( stb $2,-1($16) ) # .. .. .. L : - addq $17,1,$17 # .. .. E .. : quadpack as the load + EXO ( stb $2,-1($6) ) # .. .. .. L : + addq $7,1,$7 # .. .. E .. : quadpack as the load subq $0,1,$0 # .. E .. .. : change count _after_ copy bgt $0,$onebyteloop # U .. .. .. : U L U L $zerolength: -$exitin: $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. - ret $31,($26),1 # L0 .. .. .. : L U L U + ret $31,($28),1 # L0 .. .. .. : L U L U + +$exitin: + + /* A stupid byte-by-byte zeroing of the rest of the output + buffer. This cures security holes by never leaving + random kernel data around to be copied elsewhere. */ + + nop + nop + nop + mov $0,$1 + +$101: + EXO ( stb $31,0($6) ) # L + subq $1,1,$1 # E + addq $6,1,$6 # E + bgt $1,$101 # U + + nop + nop + nop + ret $31,($28),1 # L0 .end __copy_user - EXPORT_SYMBOL(__copy_user) + diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S index de62627ac4fe..fc0bc399f872 100644 --- a/arch/alpha/lib/ev6-csum_ipv6_magic.S +++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S @@ -52,7 +52,6 @@ * may cause additional delay in rare cases (load-load replay traps). */ -#include .globl csum_ipv6_magic .align 4 .ent csum_ipv6_magic @@ -149,4 +148,3 @@ csum_ipv6_magic: ret # L0 : L U L U .end csum_ipv6_magic - EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S index d18dc0e96e3d..2a82b9be93fa 100644 --- a/arch/alpha/lib/ev6-divide.S +++ b/arch/alpha/lib/ev6-divide.S @@ -55,7 +55,6 @@ * Try not to change the actual algorithm if possible for consistency. */ -#include #define halt .long 0 /* @@ -206,7 +205,6 @@ ufunction: addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end ufunction -EXPORT_SYMBOL(ufunction) /* * Uhh.. Ugly signed division. I'd rather not have it at all, but @@ -259,4 +257,3 @@ sfunction: addq $30,STACK,$30 # E : ret $31,($23),1 # L0 : L U U L .end sfunction -EXPORT_SYMBOL(sfunction) diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S index 419adc53ccb4..1a5f71b9d8b1 100644 --- a/arch/alpha/lib/ev6-memchr.S +++ b/arch/alpha/lib/ev6-memchr.S @@ -27,7 +27,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ -#include + .set noreorder .set noat @@ -189,4 +189,3 @@ $not_found: ret # L0 : .end memchr - EXPORT_SYMBOL(memchr) diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S index b19798b2efc0..52b37b0f2af5 100644 --- a/arch/alpha/lib/ev6-memcpy.S +++ b/arch/alpha/lib/ev6-memcpy.S @@ -19,7 +19,7 @@ * Temp usage notes: * $1,$2, - scratch */ -#include + .set noreorder .set noat @@ -242,7 +242,6 @@ $nomoredata: nop # E : .end memcpy - EXPORT_SYMBOL(memcpy) /* For backwards module compatibility. */ __memcpy = memcpy diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S index fed21c6893e8..356bb2fdd705 100644 --- a/arch/alpha/lib/ev6-memset.S +++ b/arch/alpha/lib/ev6-memset.S @@ -26,7 +26,7 @@ * as fixes will need to be made in multiple places. The performance gain * is worth it. */ -#include + .set noat .set noreorder .text @@ -229,7 +229,6 @@ end_b: nop ret $31,($26),1 # L0 : .end ___memset - EXPORT_SYMBOL(___memset) /* * This is the original body of code, prior to replication and @@ -407,7 +406,6 @@ end: nop ret $31,($26),1 # L0 : .end __constant_c_memset - EXPORT_SYMBOL(__constant_c_memset) /* * This is a replicant of the __constant_c_memset code, rescheduled @@ -596,9 +594,6 @@ end_w: ret $31,($26),1 # L0 : .end __memsetw - EXPORT_SYMBOL(__memsetw) memset = ___memset __memset = ___memset - EXPORT_SYMBOL(memset) - EXPORT_SYMBOL(__memset) diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S index b69f60419be1..c426fe3ed72f 100644 --- a/arch/alpha/lib/ev67-strcat.S +++ b/arch/alpha/lib/ev67-strcat.S @@ -19,7 +19,7 @@ * string once. */ -#include + .text .align 4 @@ -52,4 +52,3 @@ $found: cttz $2, $3 # U0 : br __stxcpy # L0 : .end strcat - EXPORT_SYMBOL(strcat) diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S index ea8f2f35db9c..fbb7b4ffade9 100644 --- a/arch/alpha/lib/ev67-strchr.S +++ b/arch/alpha/lib/ev67-strchr.S @@ -15,7 +15,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 * Try not to change the actual algorithm if possible for consistency. */ -#include + #include .set noreorder @@ -86,4 +86,3 @@ $found: negq t0, t1 # E : clear all but least set bit ret # L0 : .end strchr - EXPORT_SYMBOL(strchr) diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S index 736fd41884a8..503928072523 100644 --- a/arch/alpha/lib/ev67-strlen.S +++ b/arch/alpha/lib/ev67-strlen.S @@ -17,7 +17,7 @@ * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ -#include + .set noreorder .set noat @@ -47,4 +47,3 @@ $found: ret $31, ($26) # L0 : .end strlen - EXPORT_SYMBOL(strlen) diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S index cd35cbade73a..4ae716cd2bfb 100644 --- a/arch/alpha/lib/ev67-strncat.S +++ b/arch/alpha/lib/ev67-strncat.S @@ -20,7 +20,7 @@ * Try not to change the actual algorithm if possible for consistency. */ -#include + .text .align 4 @@ -92,4 +92,3 @@ $zerocount: ret # L0 : .end strncat - EXPORT_SYMBOL(strncat) diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S index 747455f0328c..dd0d8c6b9f59 100644 --- a/arch/alpha/lib/ev67-strrchr.S +++ b/arch/alpha/lib/ev67-strrchr.S @@ -18,7 +18,7 @@ * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 */ -#include + #include .set noreorder @@ -107,4 +107,3 @@ $eos: nop .end strrchr - EXPORT_SYMBOL(strrchr) diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 4aa6dbfa14ee..05017ba34c3c 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -4,9 +4,6 @@ * (C) Copyright 1998 Linus Torvalds */ -#include -#include - #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); #else @@ -55,7 +52,6 @@ alpha_read_fp_reg (unsigned long reg) } return val; } -EXPORT_SYMBOL(alpha_read_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); @@ -101,7 +97,6 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val) case 31: LDT(31, val); break; } } -EXPORT_SYMBOL(alpha_write_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); @@ -151,7 +146,6 @@ alpha_read_fp_reg_s (unsigned long reg) } return val; } -EXPORT_SYMBOL(alpha_read_fp_reg_s); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); @@ -197,4 +191,3 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val) case 31: LDS(31, val); break; } } -EXPORT_SYMBOL(alpha_write_fp_reg_s); diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S index c13d3eca2e05..14427eeb555e 100644 --- a/arch/alpha/lib/memchr.S +++ b/arch/alpha/lib/memchr.S @@ -31,7 +31,7 @@ For correctness consider that: - only minimum number of quadwords may be accessed - the third argument is an unsigned long */ -#include + .set noreorder .set noat @@ -162,4 +162,3 @@ $not_found: ret # .. e1 : .end memchr - EXPORT_SYMBOL(memchr) diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c index 57d9291ad172..64083fc73238 100644 --- a/arch/alpha/lib/memcpy.c +++ b/arch/alpha/lib/memcpy.c @@ -16,7 +16,6 @@ */ #include -#include /* * This should be done in one go with ldq_u*2/mask/stq_u. Do it @@ -159,4 +158,6 @@ void * memcpy(void * dest, const void *src, size_t n) __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } -EXPORT_SYMBOL(memcpy); + +/* For backward modules compatibility, define __memcpy. */ +asm("__memcpy = memcpy; .globl __memcpy"); diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S index ff6a39d38385..eb3b6e02242f 100644 --- a/arch/alpha/lib/memmove.S +++ b/arch/alpha/lib/memmove.S @@ -6,7 +6,7 @@ * This is hand-massaged output from the original memcpy.c. We defer to * memcpy whenever possible; the backwards copy loops are not unrolled. */ -#include + .set noat .set noreorder .text @@ -179,4 +179,3 @@ $egress: nop .end memmove - EXPORT_SYMBOL(memmove) diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S index 89a26f5e89de..76ccc6d1f364 100644 --- a/arch/alpha/lib/memset.S +++ b/arch/alpha/lib/memset.S @@ -13,7 +13,7 @@ * The scheduling comments are according to the EV5 documentation (and done by * hand, so they might well be incorrect, please do tell me about it..) */ -#include + .set noat .set noreorder .text @@ -106,8 +106,6 @@ within_one_quad: end: ret $31,($26),1 /* E1 */ .end ___memset -EXPORT_SYMBOL(___memset) -EXPORT_SYMBOL(__constant_c_memset) .align 5 .ent __memsetw @@ -124,9 +122,6 @@ __memsetw: br __constant_c_memset /* .. E1 */ .end __memsetw -EXPORT_SYMBOL(__memsetw) memset = ___memset __memset = ___memset - EXPORT_SYMBOL(memset) - EXPORT_SYMBOL(__memset) diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S index 249837b03d4b..393f50384878 100644 --- a/arch/alpha/lib/strcat.S +++ b/arch/alpha/lib/strcat.S @@ -4,7 +4,6 @@ * * Append a null-terminated string from SRC to DST. */ -#include .text @@ -51,4 +50,3 @@ $found: negq $2, $3 # clear all but least set bit br __stxcpy .end strcat -EXPORT_SYMBOL(strcat); diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S index 7412a173ea39..011a175e8329 100644 --- a/arch/alpha/lib/strchr.S +++ b/arch/alpha/lib/strchr.S @@ -5,7 +5,7 @@ * Return the address of a given character within a null-terminated * string, or null if it is not found. */ -#include + #include .set noreorder @@ -68,4 +68,3 @@ $retnull: ret # .. e1 : .end strchr - EXPORT_SYMBOL(strchr) diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S index 98deae1e4d08..e0728e4ad21f 100644 --- a/arch/alpha/lib/strcpy.S +++ b/arch/alpha/lib/strcpy.S @@ -5,7 +5,7 @@ * Copy a null-terminated string from SRC to DST. Return a pointer * to the null-terminator in the source. */ -#include + .text .align 3 @@ -21,4 +21,3 @@ strcpy: br __stxcpy # do the copy .end strcpy - EXPORT_SYMBOL(strcpy) diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S index 79c416f71bac..fe63353de152 100644 --- a/arch/alpha/lib/strlen.S +++ b/arch/alpha/lib/strlen.S @@ -11,7 +11,7 @@ * do this instead of the 9 instructions that * binary search needs). */ -#include + .set noreorder .set noat @@ -55,4 +55,3 @@ done: subq $0, $16, $0 ret $31, ($26) .end strlen - EXPORT_SYMBOL(strlen) diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S index 6c29ea60869a..a8278163c972 100644 --- a/arch/alpha/lib/strncat.S +++ b/arch/alpha/lib/strncat.S @@ -9,7 +9,7 @@ * past count, whereas libc may write to count+1. This follows the generic * implementation in lib/string.c and is, IMHO, more sensible. */ -#include + .text .align 3 @@ -82,4 +82,3 @@ $zerocount: ret .end strncat - EXPORT_SYMBOL(strncat) diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S index e102cf1567dd..a46f7f3ad8c7 100644 --- a/arch/alpha/lib/strncpy.S +++ b/arch/alpha/lib/strncpy.S @@ -10,7 +10,7 @@ * version has cropped that bit o' nastiness as well as assuming that * __stxncpy is in range of a branch. */ -#include + .set noat .set noreorder @@ -79,4 +79,3 @@ $zerolen: ret .end strncpy - EXPORT_SYMBOL(strncpy) diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S index 4bc6cb4b9812..1970dc07cfd1 100644 --- a/arch/alpha/lib/strrchr.S +++ b/arch/alpha/lib/strrchr.S @@ -5,7 +5,7 @@ * Return the address of the last occurrence of a given character * within a null-terminated string, or null if it is not found. */ -#include + #include .set noreorder @@ -85,4 +85,3 @@ $retnull: ret # .. e1 : .end strrchr - EXPORT_SYMBOL(strrchr) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 77f6b1c73737..a5d8bef65911 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -23,7 +23,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK - select HAVE_FUTEX_CMPXCHG if FUTEX + select HAVE_FUTEX_CMPXCHG select HAVE_IOREMAP_PROT select HAVE_KPROBES select HAVE_KRETPROBES diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 150656503c11..8f8d53f08141 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -108,7 +108,6 @@ bootpImage: vmlinux boot_targets += uImage uImage.bin uImage.gz -PHONY += $(boot_targets) $(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index fd2c88ef2e2b..8f1145ed0046 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -17,7 +17,6 @@ #define free_user_page(page, addr) free_page(addr) #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) struct vm_area_struct; diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 6da48c1e3475..db1eee5fe502 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -181,7 +181,7 @@ tracesys: ; Do the Sys Call as we normally would. ; Validate the Sys Call number - cmp r8, NR_syscalls - 1 + cmp r8, NR_syscalls mov.hi r0, -ENOSYS bhi tracesys_exit @@ -264,7 +264,7 @@ ENTRY(EV_Trap) ;============ Normal syscall case ; syscall num shd not exceed the total system calls avail - cmp r8, NR_syscalls - 1 + cmp r8, NR_syscalls mov.hi r0, -ENOSYS bhi ret_from_system_call diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 639f39f39917..257b8699efde 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -97,7 +97,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); - return err ? -EFAULT : 0; + return err; } static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) @@ -111,7 +111,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); if (err) - return -EFAULT; + return err; set_current_blocked(&set); regs->bta = uregs.scratch.bta; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 054511f571df..5401e2bab3da 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -39,15 +39,15 @@ #ifdef CONFIG_ARC_DW2_UNWIND -static int -seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, - struct unwind_frame_info *frame_info) +static void seed_unwind_frame_info(struct task_struct *tsk, + struct pt_regs *regs, + struct unwind_frame_info *frame_info) { /* * synchronous unwinding (e.g. dump_stack) * - uses current values of SP and friends */ - if (regs == NULL && (tsk == NULL || tsk == current)) { + if (tsk == NULL && regs == NULL) { unsigned long fp, sp, blink, ret; frame_info->task = current; @@ -66,15 +66,11 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, frame_info->call_frame = 0; } else if (regs == NULL) { /* - * Asynchronous unwinding of a likely sleeping task - * - first ensure it is actually sleeping - * - if so, it will be in __switch_to, kernel mode SP of task - * is safe-kept and BLINK at a well known location in there + * Asynchronous unwinding of sleeping task + * - Gets SP etc from task's pt_regs (saved bottom of kernel + * mode stack of task) */ - if (tsk->state == TASK_RUNNING) - return -1; - frame_info->task = tsk; frame_info->regs.r27 = TSK_K_FP(tsk); @@ -108,8 +104,6 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, frame_info->regs.r63 = regs->ret; frame_info->call_frame = 0; } - - return 0; } #endif @@ -123,8 +117,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, unsigned int address; struct unwind_frame_info frame_info; - if (seed_unwind_frame_info(tsk, regs, &frame_info)) - return 0; + seed_unwind_frame_info(tsk, regs, &frame_info); while (1) { address = UNW_PC(&frame_info); diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index f425405a8a76..017fb440bba4 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -904,7 +904,7 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) clear_page(to); clear_bit(PG_dc_clean, &page->flags); } -EXPORT_SYMBOL(clear_user_page); + /********************************************************************** * Explicit Cache flush request from user space via syscall diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 105d1888e17e..179241a83602 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -55,7 +55,6 @@ config ARM select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) - select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_IDE if PCI || ISA || PCMCIA diff --git a/arch/arm/Makefile b/arch/arm/Makefile index cf90774f3ef3..d0e39999656a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -13,7 +13,7 @@ # Ensure linker flags are correct LDFLAGS := -LDFLAGS_vmlinux := --no-undefined -X --pic-veneer +LDFLAGS_vmlinux :=-p --no-undefined -X --pic-veneer ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 LDFLAGS_MODULE += --be8 @@ -66,15 +66,15 @@ KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra) # Note that GCC does not numerically define an architecture version # macro, but instead defines a whole series of macros which makes # testing for a specific architecture or later rather impossible. -arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 -march=armv7-a -arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 -march=armv6 +arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m +arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) +arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) # Only override the compiler option if ARMv6. The ARMv6K extensions are # always available in ARMv7 ifeq ($(CONFIG_CPU_32v6),y) -arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 -march=armv6k +arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) endif -arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 -march=armv5te +arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 @@ -88,7 +88,7 @@ tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi -tune-$(CONFIG_CPU_ARM946E) =-mtune=arm9e +tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi @@ -96,11 +96,11 @@ tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110 tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100 -tune-$(CONFIG_CPU_XSCALE) =-mtune=xscale -tune-$(CONFIG_CPU_XSC3) =-mtune=xscale -tune-$(CONFIG_CPU_FEROCEON) =-mtune=xscale -tune-$(CONFIG_CPU_V6) =-mtune=arm1136j-s -tune-$(CONFIG_CPU_V6K) =-mtune=arm1136j-s +tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale +tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale +tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale) +tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) +tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) # Evaluate tune cc-option calls now tune-y := $(tune-y) diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile index 9ee49d50842f..5761f0039133 100644 --- a/arch/arm/boot/bootp/Makefile +++ b/arch/arm/boot/bootp/Makefile @@ -7,7 +7,7 @@ GCOV_PROFILE := n -LDFLAGS_bootp := --no-undefined -X \ +LDFLAGS_bootp :=-p --no-undefined -X \ --defsym initrd_phys=$(INITRD_PHYS) \ --defsym params_phys=$(PARAMS_PHYS) -T AFLAGS_initrd.o :=-DINITRD=\"$(INITRD)\" diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index b4631e07b3b6..3f9a9ebc77c3 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -86,8 +86,6 @@ $(addprefix $(obj)/,$(libfdt_objs) atags_to_fdt.o): \ $(addprefix $(obj)/,$(libfdt_hdrs)) ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y) -CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN} -CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280 OBJS += $(libfdt_objs) atags_to_fdt.o endif @@ -122,6 +120,8 @@ endif ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 endif +# ? +LDFLAGS_vmlinux += -p # Report unresolved symbol references LDFLAGS_vmlinux += --no-undefined # Delete all temporary local symbols diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index b0255cbf3b76..a0765e7ed6c7 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -46,10 +46,7 @@ extern char * strstr(const char * s1, const char *s2); #endif #ifdef CONFIG_KERNEL_XZ -/* Prevent KASAN override of string helpers in decompressor */ -#undef memmove #define memmove memmove -#undef memcpy #define memcpy memcpy #include "../../../../lib/decompress_unxz.c" #endif diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 2edb386d3f4b..d2e43b053d9b 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -1076,9 +1076,9 @@ __armv4_mmu_cache_off: __armv7_mmu_cache_off: mrc p15, 0, r0, c1, c0 #ifdef CONFIG_MMU - bic r0, r0, #0x0005 + bic r0, r0, #0x000d #else - bic r0, r0, #0x0004 + bic r0, r0, #0x000c #endif mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mov r12, lr diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index e7cd99793bc6..a74b09f17a1a 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -411,7 +411,7 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins>; - clock-frequency = <100000>; + clock-frequency = <400000>; tps65218: tps65218@24 { reg = <0x24>; diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index 2b882d129b16..0bd325c314e1 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -231,11 +231,6 @@ atmel,pins = ; /* PE9, conflicts with A9 */ }; - pinctrl_usb_default: usb_default { - atmel,pins = - ; - }; }; }; }; @@ -293,8 +288,6 @@ &pioE 3 GPIO_ACTIVE_LOW &pioE 4 GPIO_ACTIVE_LOW >; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb_default>; status = "okay"; }; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 17adaa1a65c1..e27024cdf48b 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -152,11 +152,6 @@ atmel,pins = ; }; - pinctrl_usb_default: usb_default { - atmel,pins = - ; - }; pinctrl_key_gpio: key_gpio_0 { atmel,pins = ; @@ -182,8 +177,6 @@ &pioE 11 GPIO_ACTIVE_HIGH &pioE 14 GPIO_ACTIVE_HIGH >; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usb_default>; status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 47d721241408..de8ac998604d 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -175,8 +175,6 @@ gpio-controller; #gpio-cells = <2>; - interrupt-controller; - #interrupt-cells = <2>; }; }; diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi index 84efc3d16f58..34cd64051250 100644 --- a/arch/arm/boot/dts/bcm63138.dtsi +++ b/arch/arm/boot/dts/bcm63138.dtsi @@ -152,7 +152,7 @@ status = "disabled"; }; - nand_controller: nand-controller@2000 { + nand: nand@2000 { #address-cells = <1>; #size-cells = <0>; compatible = "brcm,nand-bcm63138", "brcm,brcmnand-v7.0", "brcm,brcmnand"; diff --git a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts index e51c9b079432..0bb8d17e4c2d 100644 --- a/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts +++ b/arch/arm/boot/dts/bcm7445-bcm97445svmb.dts @@ -13,10 +13,10 @@ }; }; -&nand_controller { +&nand { status = "okay"; - nand@1 { + nandcs@1 { compatible = "brcm,nandcs"; reg = <1>; nand-ecc-step-size = <512>; diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi index 3f002f2047f1..4791321969b3 100644 --- a/arch/arm/boot/dts/bcm7445.dtsi +++ b/arch/arm/boot/dts/bcm7445.dtsi @@ -149,7 +149,7 @@ reg-names = "aon-ctrl", "aon-sram"; }; - nand_controller: nand-controller@3e2800 { + nand: nand@3e2800 { status = "disabled"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/bcm963138dvt.dts b/arch/arm/boot/dts/bcm963138dvt.dts index 439cff69e948..370aa2cfddf2 100644 --- a/arch/arm/boot/dts/bcm963138dvt.dts +++ b/arch/arm/boot/dts/bcm963138dvt.dts @@ -29,10 +29,10 @@ status = "okay"; }; -&nand_controller { +&nand { status = "okay"; - nand@0 { + nandcs@0 { compatible = "brcm,nandcs"; reg = <0>; nand-ecc-strength = <4>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 97b152e43f9c..0f5dcd418af8 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -134,7 +134,7 @@ compatible = "maxim,max77686"; reg = <0x09>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupts = <2 IRQ_TYPE_NONE>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index b45ad99da8c5..0a7f408824d8 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi @@ -281,7 +281,7 @@ max77686: max77686@09 { compatible = "maxim,max77686"; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupts = <2 IRQ_TYPE_NONE>; pinctrl-names = "default"; pinctrl-0 = <&max77686_irq>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index 4b3bd43f7721..c1edd6d038a9 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -112,7 +112,7 @@ compatible = "samsung,s5m8767-pmic"; reg = <0x66>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupts = <2 IRQ_TYPE_NONE>; pinctrl-names = "default"; pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>; wakeup-source; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index 5cf9bcc91c4a..b54c0b8a5b34 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -75,7 +75,7 @@ s2mps11,buck4-ramp-enable = <1>; interrupt-parent = <&gpx3>; - interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupts = <2 IRQ_TYPE_EDGE_FALLING>; pinctrl-names = "default"; pinctrl-0 = <&s2mps11_irq>; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts index b45e2a0c3908..2faf88627a48 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts @@ -26,7 +26,7 @@ label = "blue:heartbeat"; pwms = <&pwm 2 2000000 0>; pwm-names = "pwm2"; - max-brightness = <255>; + max_brightness = <255>; linux,default-trigger = "heartbeat"; }; }; diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index acaa3a7c2fc6..57e29977ba06 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -48,6 +48,7 @@ MX23_PAD_LCD_RESET__GPIO_1_18 MX23_PAD_PWM3__GPIO_1_29 MX23_PAD_PWM4__GPIO_1_30 + MX23_PAD_SSP1_DETECT__SSP1_DETECT >; fsl,drive-strength = ; fsl,voltage = ; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index e3e3a7a08d08..cae04e806036 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -307,8 +307,8 @@ fsl,pins = < MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 - MX6QDL_PAD_EIM_D31__UART3_RTS_B 0x1b0b1 - MX6QDL_PAD_EIM_D30__UART3_CTS_B 0x1b0b1 + MX6QDL_PAD_EIM_D30__UART3_RTS_B 0x1b0b1 + MX6QDL_PAD_EIM_D31__UART3_CTS_B 0x1b0b1 >; }; @@ -383,7 +383,6 @@ &uart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3>; - uart-has-rtscts; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi index de65c3ec9ba6..061e6a00b255 100644 --- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi +++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi @@ -9,8 +9,6 @@ * */ -#include - / { chosen { stdout-path = &uart2; @@ -109,7 +107,6 @@ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 - MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 >; }; }; @@ -132,6 +129,6 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi index 58d288fddd9c..73e272fadc20 100644 --- a/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi +++ b/arch/arm/boot/dts/omap-gpmc-smsc9221.dtsi @@ -28,7 +28,7 @@ compatible = "smsc,lan9221","smsc,lan9115"; bank-width = <2>; - gpmc,mux-add-data = <0>; + gpmc,mux-add-data; gpmc,cs-on-ns = <0>; gpmc,cs-rd-off-ns = <42>; gpmc,cs-wr-off-ns = <36>; diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi index 3dbeb7a6c569..82e98ee3023a 100644 --- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi @@ -25,7 +25,7 @@ compatible = "smsc,lan9221","smsc,lan9115"; bank-width = <2>; - gpmc,mux-add-data = <0>; + gpmc,mux-add-data; gpmc,cs-on-ns = <0>; gpmc,cs-rd-off-ns = <42>; gpmc,cs-wr-off-ns = <36>; diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index a2e41d79e829..8a2b25332b8c 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -22,9 +22,6 @@ i2c0 = &i2c1; i2c1 = &i2c2; i2c2 = &i2c3; - mmc0 = &mmc1; - mmc1 = &mmc2; - mmc2 = &mmc3; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 656e35ec037d..8a5628c4b135 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -21,11 +21,6 @@ i2c1 = &i2c2; i2c2 = &i2c3; i2c3 = &i2c4; - mmc0 = &mmc1; - mmc1 = &mmc2; - mmc2 = &mmc3; - mmc3 = &mmc4; - mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index b61ea6ca59b3..4c04389dab32 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -26,11 +26,6 @@ i2c2 = &i2c3; i2c3 = &i2c4; i2c4 = &i2c5; - mmc0 = &mmc1; - mmc1 = &mmc2; - mmc2 = &mmc3; - mmc3 = &mmc4; - mmc4 = &mmc5; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi index f22a6b436317..533919e96eae 100644 --- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi @@ -54,21 +54,18 @@ emac: gem@30000 { compatible = "cadence,gem"; reg = <0x30000 0x10000>; - interrupt-parent = <&vic0>; interrupts = <31>; }; dmac1: dmac@40000 { compatible = "snps,dw-dmac"; reg = <0x40000 0x10000>; - interrupt-parent = <&vic0>; interrupts = <25>; }; dmac2: dmac@50000 { compatible = "snps,dw-dmac"; reg = <0x50000 0x10000>; - interrupt-parent = <&vic0>; interrupts = <26>; }; @@ -246,7 +243,6 @@ axi2pico@c0000000 { compatible = "picochip,axi2pico-pc3x2"; reg = <0xc0000000 0x10000>; - interrupt-parent = <&vic0>; interrupts = <13 14 15 16 17 18 19 20 21>; }; }; diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile index ea789f36c2e7..d9fcd4b9e5cb 100644 --- a/arch/arm/boot/dts/qcom/Makefile +++ b/arch/arm/boot/dts/qcom/Makefile @@ -223,7 +223,6 @@ dtb-$(CONFIG_MACH_SONY_POPLAR) += msm8998-yoshino-poplar_generic.dtb \ dtb-$(CONFIG_MACH_SONY_POPLAR_DSDS) += msm8998-yoshino-poplar_dsds.dtb \ msm8998-v2-yoshino-poplar_dsds.dtb \ msm8998-v2.1-yoshino-poplar_dsds.dtb -dtb-$(CONFIG_MACH_SONY_POPLAR_KDDI) += msm8998-v2.1-yoshino-poplar_kddi.dtb dtb-$(CONFIG_MACH_SONY_LILAC) += msm8998-yoshino-lilac_generic.dtb \ msm8998-v2-yoshino-lilac_generic.dtb \ msm8998-v2.1-yoshino-lilac_generic.dtb diff --git a/arch/arm/boot/dts/qcom/dsi-panel-maple.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-maple.dtsi index 2e368de1184b..23e2ebff72f0 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-maple.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-maple.dtsi @@ -209,6 +209,63 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + qcom,mdss-dsi-on-command = [ + 15 01 00 00 00 00 02 90 03 + 15 01 00 00 00 00 02 03 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + 39 01 00 00 00 00 06 F0 55 AA 52 08 07 + 15 01 00 00 00 00 02 EF 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 00 + 15 01 00 00 00 00 02 B4 01 + 15 01 00 00 00 00 02 35 00 + 39 01 00 00 00 00 03 44 00 00 + 39 01 00 00 00 00 06 F0 55 AA 52 08 01 + 39 01 00 00 00 00 03 D4 88 88 + 39 01 00 00 00 00 05 FF AA 55 A5 80 + 15 01 00 00 00 00 02 6F 01 + 15 01 00 00 00 00 02 F3 10 + 39 01 00 00 00 00 05 FF AA 55 A5 00]; + qcom,mdss-dsi-post-panel-on-command = [ + 05 01 00 00 00 00 01 29 + 05 01 00 00 43 00 01 11]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + + qcom,mdss-dsi-timing-switch-command = [ + 39 00 00 00 00 00 06 F0 55 AA 52 08 00 + 15 00 00 00 00 00 02 C9 01 + 15 00 00 00 00 00 02 90 03 + 15 00 00 00 00 00 02 58 00 + 15 00 00 00 00 00 02 03 01 + 39 00 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + ]; + qcom,mdss-dsi-timing-switch-command-state = + "dsi_lp_mode"; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; @@ -301,6 +358,30 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; @@ -483,6 +564,64 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + somc,mdss-mdp-kickoff-threshold-enable; + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + qcom,mdss-dsi-on-command = [ + 15 01 00 00 00 00 02 90 03 + 15 01 00 00 00 00 02 03 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + 39 01 00 00 00 00 06 F0 55 AA 52 08 07 + 15 01 00 00 00 00 02 EF 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 00 + 15 01 00 00 00 00 02 B4 01 + 15 01 00 00 00 00 02 35 00 + 39 01 00 00 00 00 03 44 00 00 + 39 01 00 00 00 00 06 F0 55 AA 52 08 01 + 39 01 00 00 00 00 03 D4 88 88 + 39 01 00 00 00 00 05 FF AA 55 A5 80 + 15 01 00 00 00 00 02 6F 01 + 15 01 00 00 00 00 02 F3 10 + 39 01 00 00 00 00 05 FF AA 55 A5 00]; + qcom,mdss-dsi-post-panel-on-command = [ + 05 01 00 00 00 00 01 29 + 05 01 00 00 43 00 01 11]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + + qcom,mdss-dsi-timing-switch-command = [ + 39 00 00 00 00 00 06 F0 55 AA 52 08 00 + 15 00 00 00 00 00 02 C9 01 + 15 00 00 00 00 00 02 90 03 + 15 00 00 00 00 00 02 58 00 + 15 00 00 00 00 00 02 03 01 + 39 00 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + ]; + qcom,mdss-dsi-timing-switch-command-state = + "dsi_lp_mode"; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; @@ -579,6 +718,31 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + somc,mdss-mdp-kickoff-threshold-enable; + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; @@ -744,6 +908,63 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + qcom,mdss-dsi-on-command = [ + 15 01 00 00 00 00 02 90 03 + 15 01 00 00 00 00 02 03 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + 39 01 00 00 00 00 06 F0 55 AA 52 08 07 + 15 01 00 00 00 00 02 EF 01 + 39 01 00 00 00 00 06 F0 55 AA 52 08 00 + 15 01 00 00 00 00 02 B4 01 + 15 01 00 00 00 00 02 35 00 + 39 01 00 00 00 00 03 44 00 00 + 39 01 00 00 00 00 06 F0 55 AA 52 08 01 + 39 01 00 00 00 00 03 D4 88 88 + 39 01 00 00 00 00 05 FF AA 55 A5 80 + 15 01 00 00 00 00 02 6F 01 + 15 01 00 00 00 00 02 F3 10 + 39 01 00 00 00 00 05 FF AA 55 A5 00]; + qcom,mdss-dsi-post-panel-on-command = [ + 05 01 00 00 00 00 01 29 + 05 01 00 00 43 00 01 11]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + + qcom,mdss-dsi-timing-switch-command = [ + 39 00 00 00 00 00 06 F0 55 AA 52 08 00 + 15 00 00 00 00 00 02 C9 01 + 15 00 00 00 00 00 02 90 03 + 15 00 00 00 00 00 02 58 00 + 15 00 00 00 00 00 02 03 01 + 39 00 00 00 00 00 06 F0 55 AA 52 08 04 + 15 01 00 00 00 00 02 C0 03 + ]; + qcom,mdss-dsi-timing-switch-command-state = + "dsi_lp_mode"; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; @@ -836,6 +1057,30 @@ qcom,mdss-dsi-panel-clockrate = <899000000>; }; + + 4k { + qcom,mdss-dsi-panel-width = <1080>; + qcom,mdss-dsi-panel-height = <3840>; + qcom,mdss-dsi-h-back-porch = <104>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-front-porch = <140>; + qcom,mdss-dsi-v-back-porch = <10>; + qcom,mdss-dsi-v-pulse-width = <2>; + qcom,mdss-dsi-v-front-porch = <12>; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-panel-timings = [00 1B 06 06 0B 11 05 07 05 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x07>; + qcom,mdss-dsi-t-clk-pre = <0x29>; + + qcom,compression-mode = "dsc"; + qcom,config-select = <&dsi_dual_default_cmd_config1>; + + qcom,mdss-tear-check-sync-init-val = <3840>; + qcom,mdss-tear-check-start-pos = <3840>; + qcom,mdss-tear-check-rd-ptr-trigger-intr = <3841>; + + qcom,mdss-dsi-panel-clockrate = <899000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi index 51c4c8c5be22..aef9200f5d22 100644 --- a/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi +++ b/arch/arm/boot/dts/qcom/msm-audio-lpass.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -293,16 +293,6 @@ qcom,msm-dai-q6-dev-id = <32770>; }; - proxy_rx: qcom,msm-dai-q6-proxy-rx { - compatible = "qcom,msm-dai-q6-dev"; - qcom,msm-dai-q6-dev-id = <8194>; - }; - - proxy_tx: qcom,msm-dai-q6-proxy-tx { - compatible = "qcom,msm-dai-q6-dev"; - qcom,msm-dai-q6-dev-id = <8195>; - }; - usb_audio_rx: qcom,msm-dai-q6-usb-audio-rx { compatible = "qcom,msm-dai-q6-dev"; qcom,msm-dai-q6-dev-id = <28672>; diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi index 2082934112a0..460e7e76ac4d 100644 --- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -629,25 +629,29 @@ #address-cells = <2>; #size-cells = <0>; - pm660_haptics: qcom,haptics@c000 { - compatible = "qcom,qpnp-haptics"; + pm660_haptics: qcom,haptic@c000 { + compatible = "qcom,qpnp-haptic"; reg = <0xc000 0x100>; interrupts = <0x1 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>, - <0x1 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; - interrupt-names = "hap-sc-irq", "hap-play-irq"; + <0x1 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "sc-irq", "play-irq"; qcom,pmic-revid = <&pm660_revid>; qcom,pmic-misc = <&pm660_misc>; qcom,misc-clk-trim-error-reg = <0xf3>; - qcom,actuator-type = <0>; + qcom,actuator-type = "lra"; qcom,play-mode = "direct"; qcom,vmax-mv = <3200>; qcom,ilim-ma = <800>; - qcom,sc-dbc-cycles = <8>; + qcom,wave-shape = "square"; qcom,wave-play-rate-us = <6667>; + qcom,int-pwm-freq-khz = <505>; + qcom,sc-deb-cycles = <8>; qcom,en-brake; + qcom,brake-pattern = [03 03 00 00]; qcom,lra-high-z = "opt0"; qcom,lra-auto-res-mode = "qwd"; - qcom,lra-res-cal-period = <4>; + qcom,lra-calibrate-at-eop = <0>; + qcom,correct-lra-drive-freq; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi index b710d5451bde..4c049c8007cc 100644 --- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -638,25 +638,26 @@ status = "okay"; }; - pmi8998_haptics: qcom,haptics@c000 { - compatible = "qcom,qpnp-haptics"; + pmi8998_haptics: qcom,haptic@c000 { + status = "disabled"; + compatible = "qcom,qpnp-haptic"; reg = <0xc000 0x100>; interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>, <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; - interrupt-names = "hap-sc-irq", "hap-play-irq"; + interrupt-names = "sc-irq", "play-irq"; qcom,pmic-revid = <&pmi8998_revid>; qcom,pmic-misc = <&pmi8998_misc>; qcom,misc-clk-trim-error-reg = <0xf3>; - qcom,int-pwm-freq-khz = <505>; + qcom,actuator-type = "lra"; qcom,play-mode = "direct"; - qcom,wave-play-rate-us = <6667>; - qcom,actuator-type = <0>; - qcom,wave-shape = "square"; qcom,vmax-mv = <3200>; qcom,ilim-ma = <800>; + qcom,wave-shape = "square"; + qcom,wave-play-rate-us = <6667>; + qcom,int-pwm-freq-khz = <505>; qcom,sc-deb-cycles = <8>; qcom,en-brake; - qcom,brake-pattern = <0x3 0x3 0x0 0x0>; + qcom,brake-pattern = [03 03 00 00]; qcom,lra-high-z = "opt1"; qcom,lra-auto-res-mode = "qwd"; qcom,lra-res-cal-period = <4>; diff --git a/arch/arm/boot/dts/qcom/msm8916.dtsi b/arch/arm/boot/dts/qcom/msm8916.dtsi index 5cac11e6ba4b..8d184ff19642 100644 --- a/arch/arm/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm/boot/dts/qcom/msm8916.dtsi @@ -25,8 +25,8 @@ #size-cells = <2>; aliases { - mmc0 = &sdhc_1; /* SDC1 eMMC slot */ - mmc1 = &sdhc_2; /* SDC2 SD card slot */ + sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ + sdhc2 = &sdhc_2; /* SDC2 SD card slot */ }; chosen { }; diff --git a/arch/arm/boot/dts/qcom/msm8998-audio.dtsi b/arch/arm/boot/dts/qcom/msm8998-audio.dtsi index 0481f8a6941c..2e37ff6fe953 100644 --- a/arch/arm/boot/dts/qcom/msm8998-audio.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-audio.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -122,8 +122,7 @@ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>, <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>, - <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>, - <&proxy_rx>, <&proxy_tx>; + <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>; asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", @@ -145,8 +144,7 @@ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", - "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", - "msm-dai-q6-dev.8194", "msm-dai-q6-dev.8195"; + "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913"; asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>; asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx"; @@ -257,8 +255,7 @@ <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>, <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>, - <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>, - <&proxy_rx>, <&proxy_tx>; + <&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>; asoc-cpu-names = "msm-dai-q6-hdmi.8", "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1", "msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3", @@ -280,8 +277,7 @@ "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", - "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913", - "msm-dai-q6-dev.8194", "msm-dai-q6-dev.8195"; + "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913"; asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>; asoc-codec-names = "msm-stub-codec.1", "msm-ext-disp-audio-codec-rx"; diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.1-yoshino-poplar_kddi.dts b/arch/arm/boot/dts/qcom/msm8998-v2.1-yoshino-poplar_kddi.dts deleted file mode 100644 index 40b8fbcddbbf..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-v2.1-yoshino-poplar_kddi.dts +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -/* - * Copyright (C) 2017 Sony Mobile Communications Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - */ - - -/dts-v1/; - -#include "msm8998-v2.1.dtsi" -#include "msm8998-mdss-panels.dtsi" -#include "msm8998-mtp.dtsi" -#include "msm8998-yoshino-poplar_kddi.dtsi" - -/ { - model = "SoMC Poplar-KDDI(MSM8998 v2.1)"; - compatible = "somc,poplar-kddi", "qcom,msm8998"; - qcom,board-id = <8 0>; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi index 4b59bc940315..abc0247b4475 100644 --- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi @@ -220,6 +220,9 @@ &msm_cpufreq { qcom,cpufreq-table-0 = + < 300000 >, + < 364800 >, + < 441600 >, < 518400 >, < 595200 >, < 672000 >, @@ -241,6 +244,13 @@ < 1900800 >; qcom,cpufreq-table-4 = + < 300000 >, + < 345600 >, + < 422400 >, + < 499200 >, + < 576000 >, + < 652800 >, + < 729600 >, < 806400 >, < 902400 >, < 979200 >, @@ -266,7 +276,10 @@ < 2342400 >, < 2361600 >, < 2419200 >, - < 2457600 >; + < 2457600 >, + < 2476800 >, + < 2496000 >, + < 2592000 >; }; &bwmon { diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-common.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-common.dtsi index bdfedbec7eb4..369c33f59d3e 100644 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-common.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-yoshino-common.dtsi @@ -29,7 +29,7 @@ label = "debug_mem"; }; - pstore_reserve_mem: pstore_reserve_mem_region@ffc00000 { + pstore_reserve_mem: pstore_reserve_mem_region_region@ffc00000 { compatible = "removed-dma-pool"; no-map; reg = <0 0xffc00000 0 0x00100000>; @@ -173,12 +173,6 @@ preset_x_max = <2159>; preset_y_max = <3839>; preset_n_fingers = <10>; - /* Stamina Mode */ - stamina_mode_supported = <0x80000000>; - /* F01_RMI_CTRL05: Doze Holdoff */ - doze_default_time = <35>; - doze_glove_mode_time = <35>; - doze_cover_mode_time = <35>; wakeup_gesture { double_tap { gesture_code = <0x0003>; @@ -3468,15 +3462,6 @@ }; }; -&mdss_hdmi_tx { - /delete-property/ pinctrl-names; - /delete-property/ pinctrl-0; - /delete-property/ pinctrl-1; - /delete-property/ pinctrl-2; - /delete-property/ pinctrl-3; - /delete-property/ pinctrl-4; -}; - &pmx_mdss { mdss_dsi_active: mdss_dsi_active { mux { @@ -3625,7 +3610,7 @@ linux,name = "wled"; qcom,fs-curr-ua = <24000>; qcom,led-strings-list = [00 01 02]; - somc,init-br-ua = <800>; + somc,init-br-ua = <10000>; somc-s1,br-power-save-ua = <800>; somc,bl-scale-enabled; somc,area_count_table_size = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-lilac_common.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-lilac_common.dtsi index b3620aca2dbf..d44a22001e2b 100644 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-lilac_common.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-yoshino-lilac_common.dtsi @@ -27,6 +27,13 @@ preset_x_max = <719>; preset_y_max = <1279>; + /* Stamina Mode */ + stamina_mode_supported = <0x80000003>; + + /* F01_RMI_CTRL05: Doze Holdoff */ + doze_default_time = <35>; + doze_glove_mode_time = <35>; + doze_cover_mode_time = <35>; }; }; @@ -403,6 +410,7 @@ somc,area_count_table = <0 137 273 410 546 683 819 956 1092 1229 1365 1638 1911 2184 2457 2730 3003 3276 3549 3822 4095>; + somc,init-br-ua = <8500>; }; &red_led { diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-maple_common.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-maple_common.dtsi index 88721eb338e4..dd6bd6ef14d2 100644 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-maple_common.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-yoshino-maple_common.dtsi @@ -44,6 +44,13 @@ synaptics_clearpad@2c { chip_id = <0x38>; + /* Stamina Mode */ + stamina_mode_supported = <0x80000003>; + + /* F01_RMI_CTRL05: Doze Holdoff */ + doze_default_time = <35>; + doze_glove_mode_time = <35>; + doze_cover_mode_time = <35>; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_common.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_common.dtsi index e1b71fa31cbd..09ade3662043 100644 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_common.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_common.dtsi @@ -27,6 +27,13 @@ preset_x_max = <1079>; preset_y_max = <1919>; + /* Stamina Mode */ + stamina_mode_supported = <0x80000003>; + + /* F01_RMI_CTRL05: Doze Holdoff */ + doze_default_time = <35>; + doze_glove_mode_time = <35>; + doze_cover_mode_time = <35>; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_jp-common.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_jp-common.dtsi deleted file mode 100644 index a3293d69020d..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_jp-common.dtsi +++ /dev/null @@ -1,251 +0,0 @@ -/* arch/arm64/boot/dts/qcom/msm8998-yoshino-poplar_jp-common.dtsi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -/* - * Copyright (C) 2017 Sony Mobile Communications Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - */ - -#include "msm8998-yoshino-poplar_common.dtsi" - -&soc { - somc_pinctrl: somc_pinctrl { - pinctrl-2 = <&msm_gpio_37 &msm_gpio_49 &msm_gpio_50 &msm_gpio_51 &msm_gpio_52 - &msm_gpio_90 &msm_gpio_91 &msm_gpio_92 &msm_gpio_105 &msm_gpio_106 - &msm_gpio_107 &msm_gpio_127 &msm_gpio_136 >; - }; - - /* SPI: BLSP9 */ - spi@c1b7000 { /* BLSP2 QUP3 */ - pinctrl-0 = <&msm_gpio_49 &msm_gpio_50 &msm_gpio_51 &msm_gpio_52>; - pinctrl-1 = <&msm_gpio_49 &msm_gpio_50 &msm_gpio_51 &msm_gpio_52>; - qcom,clk-freq-out = <30000000>; - status = "okay"; - }; - - felica,pm-ops { - compatible = "sony,cxd224x-pm-ops"; - }; -}; - -&pm8998_gpios { - /* GPIO_14: DIV_CLK2 */ - gpio@cd00 { - qcom,master-en = <1>; /* Enable */ - status = "okay"; - }; -}; - -&tlmm{ -/* GPIO_37 : NC */ - msm_gpio_37: msm_gpio_37 { - mux { - pins = "gpio37"; - function = "gpio"; - }; - - config { - pins = "gpio37"; - drive-strength = <2>; - /delete-property/ bias-disable; - bias-pull-up; - /delete-property/ output-low; - input-enable; - }; - }; - -/* GPIO_49 : DTV_SPI_MOSI */ - msm_gpio_49: msm_gpio_49 { - mux { - pins = "gpio49"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio49"; - drive-strength = <2>; - /delete-property/ output-low; - bias-disable; - }; - }; - -/* GPIO_50 : DTV_SPI_MISO */ - msm_gpio_50: msm_gpio_50 { - mux { - pins = "gpio50"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio50"; - drive-strength = <2>; - /delete-property/ bias-disable; - bias-pull-down; - /delete-property/ output-low; - }; - }; - -/* GPIO_51 : DTV_SPI_CS_N */ - msm_gpio_51: msm_gpio_51 { - mux { - pins = "gpio51"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio51"; - drive-strength = <2>; - bias-disable; - /delete-property/ output-low; - }; - }; - -/* GPIO_52 : DTV_SPI_CLK */ - msm_gpio_52: msm_gpio_52 { - mux { - pins = "gpio52"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio52"; - drive-strength = <2>; - bias-disable; - /delete-property/ output-low; - }; - }; - -/* GPIO_90 : DTV_RST_N */ - msm_gpio_90: msm_gpio_90 { - mux { - pins = "gpio90"; - function = "gpio"; - }; - - config { - pins = "gpio90"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - -/* GPIO_91 : DTV_INT */ - msm_gpio_91: msm_gpio_91 { - mux { - pins = "gpio91"; - function = "gpio"; - }; - - config { - pins = "gpio91"; - drive-strength = <2>; - /delete-property/ bias-disable; - bias-pull-down; - /delete-property/ output-low; - input-enable; - }; - }; - -/* GPIO_92 : NFC_IRQ_FELICA_INT_N */ - msm_gpio_92: msm_gpio_92 { - mux { - pins = "gpio92"; - function = "gpio"; - }; - - config { - pins = "gpio92"; - drive-strength = <2>; - /delete-property/ bias-pull-down; - bias-disable; - input-enable; - }; - }; - -/* GPIO_105 : NC */ - msm_gpio_105: msm_gpio_105 { - mux { - pins = "gpio105"; - function = "gpio"; - }; - - config { - pins = "gpio105"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - -/* GPIO_106 : NC */ - msm_gpio_106: msm_gpio_106 { - mux { - pins = "gpio106"; - function = "gpio"; - }; - - config { - pins = "gpio106"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - -/* GPIO_107 : NC */ - msm_gpio_107: msm_gpio_107 { - mux { - pins = "gpio107"; - function = "gpio"; - }; - - config { - pins = "gpio107"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - -/* GPIO_127 : NC */ - msm_gpio_127: msm_gpio_127 { - mux { - pins = "gpio127"; - function = "gpio"; - }; - - config { - pins = "gpio127"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - -/* GPIO_136 : NC */ - msm_gpio_136: msm_gpio_136 { - mux { - pins = "gpio136"; - function = "gpio"; - }; - - config { - pins = "gpio136"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_kddi.dtsi b/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_kddi.dtsi deleted file mode 100644 index 58b69cc411a7..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-yoshino-poplar_kddi.dtsi +++ /dev/null @@ -1,233 +0,0 @@ -/* arch/arm64/boot/dts/qcom/msm8998-yoshino-poplar_kddi.dtsi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -/* - * Copyright (C) 2017 Sony Mobile Communications Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - */ - -#include "msm8998-yoshino-poplar_jp-common.dtsi" - -&soc { - /* I2C: BLSP7 */ - i2c@c1b5000 { /* BLSP2 QUP1 */ - felica_ldo@1e { - compatible = "rohm,bd7602"; - reg = <0x1e>; - }; - felica@29 { - compatible = "sony,cxd224x-i2c"; - reg = <0x29>; - interrupt-parent = <&tlmm>; - interrupts = <92 0x2002>; - sony,nfc_int = <&tlmm 92 0>; - sony,nfc_wake = <&tlmm 93 0>; - - pinctrl-names = "felica_active","felica_suspend"; - pinctrl-0 = <&msm_gpio_92>; - pinctrl-1 = <&msm_gpio_92>; - - /* Defined in Clock Distribution */ - clocks = <&clock_gcc clk_ln_bb_clk3_pin>; - clock-names = "felica_clk"; - }; - }; - - /* SPI: BLSP9 */ - spi@c1b7000 { /* BLSP2 QUP3 */ - pinctrl-0 = <&spi_9_active &spi_9_miso_active>; - pinctrl-1 = <&spi_9_sleep &spi_9_miso_sleep>; - qcom,clk-freq-out = <30000000>; - qcom,infinite-mode = <0>; - qcom,gpio-clk = <&tlmm 52 0>; - qcom,gpio-cs0 = <&tlmm 51 0>; - qcom,gpio-mosi = <&tlmm 49 0>; - qcom,gpio-miso = <&tlmm 50 0>; - qcom,rt-priority; - status = "okay"; - - tmm3spi@0 { - compatible = "socionext,mn553-spi"; - reg = <0>; - spi-max-frequency = <30000000>; - spi-mode = <0>; - }; - }; - - dtv_tuner: mn88553 { - compatible = "socionext,mn88553"; - interrupt-parent = <&tlmm>; - interrupts = <91 0>; - gpios = <&pm8998_gpios 20 0>, /* DTV_ACTIVE */ - <&tlmm 90 0>, /* DTV_RESET_N */ - <&tlmm 91 0>; /* DTV_INT */ - }; - - tlmm: pinctrl@03400000 { - spi_9 { - spi_9_active: spi_9_active { - qcom,pin-func = <1>; - mux { - pins = "gpio49", "gpio51", "gpio52"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio49", "gpio51", "gpio52"; - drive-strength = <2>; - bias-disable; - }; - }; - - spi_9_sleep: spi_9_sleep { - qcom,pin-func = <1>; - mux { - pins = "gpio49", "gpio51", "gpio52"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio49", "gpio51", "gpio52"; - drive-strength = <2>; - bias-disable; - }; - }; - }; - - spi_9_miso { - spi_9_miso_active: spi_9_miso_active { - qcom,pin-func = <1>; - mux { - pins = "gpio50"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio50"; - drive-strength = <2>; - bias-pull-down; - }; - }; - - spi_9_miso_sleep: spi_9_miso_sleep { - qcom,pin-func = <1>; - mux { - pins = "gpio50"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio50"; - drive-strength = <2>; - bias-pull-down; - }; - }; - }; - }; -}; - -&tlmm { - /* GPIO_49 : DTV_SPI_MOSI */ - msm_gpio_49: msm_gpio_49 { - mux { - pins = "gpio49"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio49"; - drive-strength = <2>; - bias-disable; - /delete-property/ output-low; - }; - }; - - /* GPIO_50 : DTV_SPI_MISO */ - msm_gpio_50: msm_gpio_50{ - mux { - pins = "gpio50"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio50"; - drive-strength = <2>; - /delete-property/ bias-disable; - bias-pull-down; - /delete-property/ output-low; - }; - }; - - /* GPIO_51 : DTV_SPI_CS_N */ - msm_gpio_51: msm_gpio_51{ - mux { - pins = "gpio51"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio51"; - drive-strength = <2>; - bias-disable; - /delete-property/ output-low; - }; - }; - - /* GPIO_52 : DTV_SPI_CLK */ - msm_gpio_52: msm_gpio_52{ - mux { - pins = "gpio52"; - function = "blsp_spi9"; - }; - - config { - pins = "gpio52"; - drive-strength = <2>; - bias-disable; - /delete-property/ output-low; - }; - }; - - /* GPIO_90 : DTV_RST_N */ - msm_gpio_90: msm_gpio_90 { - mux { - pins="gpio90"; - function = "gpio"; - }; - - config { - pins = "gpio90"; - drive-strength = <2>; - bias-disable; - output-low; - }; - }; - - /* GPIO_91 : DTV_INT */ - msm_gpio_91: msm_gpio_91 { - mux { - pins = "gpio91"; - function = "gpio"; - }; - - config { - pins = "gpio91"; - drive-strength = <2>; - /delete-property/ bias-disable; - bias-pull-down; - /delete-property/ output-low; - input-enable; - }; - }; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi index 4c04c465b811..873ad82e7e78 100644 --- a/arch/arm/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998.dtsi @@ -56,7 +56,6 @@ efficiency = <1024>; next-level-cache = <&L2_0>; qcom,ea = <&ea0>; - sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; L2_0: l2-cache { compatible = "arm,arch-cache"; cache-level = <2>; @@ -85,7 +84,6 @@ efficiency = <1024>; next-level-cache = <&L2_0>; qcom,ea = <&ea1>; - sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; L1_I_1: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x9040>; @@ -109,7 +107,6 @@ efficiency = <1024>; next-level-cache = <&L2_0>; qcom,ea = <&ea2>; - sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; L1_I_2: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x9040>; @@ -133,7 +130,6 @@ efficiency = <1024>; next-level-cache = <&L2_0>; qcom,ea = <&ea3>; - sched-energy-costs = <&CPU_COST_0 &CLUSTER_COST_0>; L1_I_3: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x9040>; @@ -157,7 +153,6 @@ efficiency = <1536>; next-level-cache = <&L2_1>; qcom,ea = <&ea4>; - sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; L2_1: l2-cache { compatible = "arm,arch-cache"; cache-level = <2>; @@ -185,7 +180,6 @@ efficiency = <1536>; next-level-cache = <&L2_1>; qcom,ea = <&ea5>; - sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; L1_I_101: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x12000>; @@ -209,7 +203,6 @@ efficiency = <1536>; next-level-cache = <&L2_1>; qcom,ea = <&ea6>; - sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; L1_I_102: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x12000>; @@ -233,7 +226,6 @@ efficiency = <1536>; next-level-cache = <&L2_1>; qcom,ea = <&ea7>; - sched-energy-costs = <&CPU_COST_1 &CLUSTER_COST_1>; L1_I_103: l1-icache { compatible = "arm,arch-cache"; qcom,dump-size = <0x12000>; @@ -284,122 +276,6 @@ }; }; }; - energy-costs { - CPU_COST_0: core-cost0 { - busy-cost-data = < - 113 30 - 130 33 - 147 35 - 164 36 - 181 42 - 194 47 - 211 54 - 228 62 - 243 67 - 258 73 - 275 79 - 292 88 - 308 95 - 326 104 - 342 111 - 368 134 - 384 155 - 401 178 - 419 201 - >; - idle-cost-data = < - 4 4 0 0 - >; - }; - CPU_COST_1: core-cost1 { - busy-cost-data = < - 344 162 - 391 181 - 419 196 - 453 214 - 487 229 - 509 248 - 546 280 - 581 316 - 615 354 - 650 392 - 676 439 - 712 495 - 739 565 - 776 622 - 803 691 - 834 792 - 881 889 - 914 1059 - 957 1244 - 975 1375 - 996 1549 - 1016 1617 - 1021 1677 - 1024 1683 - >; - idle-cost-data = < - 10 10 0 0 - >; - }; - CLUSTER_COST_0: cluster-cost0 { - busy-cost-data = < - 113 20 - 130 21 - 147 22 - 164 23 - 181 24 - 194 27 - 211 29 - 228 30 - 243 32 - 258 33 - 275 35 - 292 38 - 308 39 - 326 42 - 342 46 - 368 48 - 384 53 - 401 59 - 419 66 - >; - idle-cost-data = < - 31 31 31 0 - >; - }; - CLUSTER_COST_1: cluster-cost1 { - busy-cost-data = < - 344 37 - 391 38 - 419 40 - 453 43 - 487 44 - 509 46 - 546 50 - 581 54 - 615 60 - 650 63 - 676 70 - 712 74 - 739 80 - 776 87 - 803 96 - 834 104 - 881 120 - 914 130 - 957 171 - 975 178 - 996 185 - 1016 200 - 1021 202 - 1024 203 - >; - idle-cost-data = < - 50 50 50 0 - >; - }; - }; }; soc: soc { }; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts index 363f742e1905..9825176351e3 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts @@ -227,21 +227,17 @@ }; }; -&ion { - /delete-node/ qcom,ion-heap@25; - - system_heap: qcom,ion-heap@25 { - reg = <25>; - qcom,ion-heap-type = "SYSTEM"; - }; -}; - &reserved_memory { pmem_shared: pmem_shared_region { reg = <0 0xd0000000 0 0x30000000>; label = "pmem_shared_mem"; }; + ion_system: ion_system_region { + reg = <0x1 0x0 0 0x10000000>; + label = "ion_system_mem"; + }; + ion_audio: ion_audio_region { reg = <0 0xc8000000 0 0x00400000>; label = "ion_audio_mem"; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 92df0fcf2bf4..fb0d1b252dc8 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1363,7 +1363,7 @@ 0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */ 0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */ 0xffffffff 0x3ff83fff 0xff00ffff /* pioC */ - 0xb003ff00 0x8002a800 0x00000000 /* pioD */ + 0x0003ff00 0x8002a800 0x00000000 /* pioD */ 0xffffffff 0x7fffffff 0x76fff1bf /* pioE */ >; diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi index 4e4166d96b26..118135d75899 100644 --- a/arch/arm/boot/dts/spear3xx.dtsi +++ b/arch/arm/boot/dts/spear3xx.dtsi @@ -53,7 +53,7 @@ }; gmac: eth@e0800000 { - compatible = "snps,dwmac-3.40a"; + compatible = "st,spear600-gmac"; reg = <0xe0800000 0x8000>; interrupts = <23 22>; interrupt-names = "macirq", "eth_wake_irq"; diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index c70d1ec02957..13d4e6185275 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -180,9 +180,8 @@ nvidia,pins = "ata", "atb", "atc", "atd", "ate", "cdev1", "cdev2", "dap1", "dtb", "gma", "gmb", "gmc", "gmd", "gme", "gpu7", - "gpv", "i2cp", "irrx", "irtx", "pta", - "rm", "slxa", "slxk", "spia", "spib", - "uac"; + "gpv", "i2cp", "pta", "rm", "slxa", + "slxk", "spia", "spib", "uac"; nvidia,pull = ; nvidia,tristate = ; }; @@ -207,7 +206,7 @@ conf_ddc { nvidia,pins = "ddc", "dta", "dtd", "kbca", "kbcb", "kbcc", "kbcd", "kbce", "kbcf", - "sdc", "uad", "uca"; + "sdc"; nvidia,pull = ; nvidia,tristate = ; }; @@ -217,9 +216,10 @@ "lvp0", "owc", "sdb"; nvidia,tristate = ; }; - conf_sdd { - nvidia,pins = "sdd", "spic", "spie", "spih", - "uaa", "uab", "ucb"; + conf_irrx { + nvidia,pins = "irrx", "irtx", "sdd", "spic", + "spie", "spih", "uaa", "uab", "uad", + "uca", "ucb"; nvidia,pull = ; nvidia,tristate = ; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 9bedd2478787..3279bf1a17a1 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -93,15 +93,16 @@ #size-cells = <1>; ranges; - vic: interrupt-controller@10140000 { + vic: intc@10140000 { compatible = "arm,versatile-vic"; interrupt-controller; #interrupt-cells = <1>; reg = <0x10140000 0x1000>; + clear-mask = <0xffffffff>; valid-mask = <0xffffffff>; }; - sic: interrupt-controller@10003000 { + sic: intc@10003000 { compatible = "arm,versatile-sic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 3a23164c2c2d..33a8eb28374e 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -6,7 +6,7 @@ amba { /* The Versatile PB is using more SIC IRQ lines than the AB */ - sic: interrupt-controller@10003000 { + sic: intc@10003000 { clear-mask = <0xffffffff>; /* * Valid interrupt lines mask according to diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 4bb55e4ce5d7..3cadb726ec88 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -257,14 +257,6 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr tlb_add_flush(tlb, addr); } -static inline void -tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, - unsigned long size) -{ - tlb_add_flush(tlb, address); - tlb_add_flush(tlb, address + size - PMD_SIZE); -} - #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a43601d61ce0..649bc3300c93 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -16,14 +16,10 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o reboot.o \ + process.o ptrace.o reboot.o return_address.o \ setup.o signal.o sigreturn_codes.o \ stacktrace.o sys_arm.o time.o traps.o -ifneq ($(CONFIG_ARM_UNWIND),y) -obj-$(CONFIG_FRAME_POINTER) += return_address.o -endif - obj-$(CONFIG_ATAGS) += atags_parse.o obj-$(CONFIG_ATAGS_PROC) += atags_proc.o obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 618ceb6fe674..3ce377f7251f 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -625,9 +625,11 @@ call_fpe: tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 reteq lr and r8, r0, #0x00000f00 @ mask out CP number + THUMB( lsr r8, r8, #8 ) mov r7, #1 - add r6, r10, r8, lsr #8 @ add used_cp[] array offset first - strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[] + add r6, r10, #TI_USED_CP + ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] + THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] #ifdef CONFIG_IWMMXT @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] @@ -636,7 +638,7 @@ call_fpe: bcs iwmmxt_task_enable #endif ARM( add pc, pc, r8, lsr #6 ) - THUMB( lsr r8, r8, #6 ) + THUMB( lsl r8, r8, #2 ) THUMB( add pc, r8 ) nop diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 2e336acd68b0..04286fd9e09c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -673,8 +673,12 @@ ARM_BE8(rev16 ip, ip) ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b bx lr +#else +#ifdef CONFIG_CPU_ENDIAN_BE8 + moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction #else moveq r0, #0x400000 @ set bit 22, mov to mvn instruction +#endif b 2f 1: ldr ip, [r7, r3] #ifdef CONFIG_CPU_ENDIAN_BE8 @@ -683,7 +687,7 @@ ARM_BE8(rev16 ip, ip) tst ip, #0x000f0000 @ check the rotation field orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 biceq ip, ip, #0x00004000 @ clear bit 22 - orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0 + orreq ip, ip, r0 @ mask in offset bits 7-0 #else bic ip, ip, #0x000000ff tst ip, #0xf00 @ check the rotation field diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index f945742dea44..36ed35073289 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -10,6 +10,8 @@ */ #include #include + +#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #include #include @@ -54,4 +56,6 @@ void *return_address(unsigned int level) return NULL; } +#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */ + EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 18013f7e2b31..1ad40fc316b2 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -488,11 +488,9 @@ void notrace cpu_init(void) * In Thumb-2, msr with an immediate value is not allowed. */ #ifdef CONFIG_THUMB2_KERNEL -#define PLC_l "l" -#define PLC_r "r" +#define PLC "r" #else -#define PLC_l "I" -#define PLC_r "I" +#define PLC "I" #endif /* @@ -514,15 +512,15 @@ void notrace cpu_init(void) "msr cpsr_c, %9" : : "r" (stk), - PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), - PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), - PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), - PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), + PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), "I" (offsetof(struct stack, fiq[0])), - PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); #endif } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index f82a1ac22164..7abc908ebea0 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -626,20 +626,18 @@ struct page *get_signal_page(void) addr = page_address(page); - /* Poison the entire page */ - memset32(addr, __opcode_to_mem_arm(0xe7fddef1), - PAGE_SIZE / sizeof(u32)); - /* Give the signal return code some randomness */ offset = 0x200 + (get_random_int() & 0x7fc); signal_return_offset = offset; - /* Copy signal return handlers into the page */ + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - /* Flush out all instructions in this page */ - ptr = (unsigned long)addr; - flush_icache_range(ptr, ptr + PAGE_SIZE); + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); return page; } diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 30bb8c972553..b75332da3bb1 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -52,7 +52,8 @@ int notrace unwind_frame(struct stackframe *frame) frame->sp = frame->fp; frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 4); + frame->pc = frame->lr; + frame->lr = *(unsigned long *)(fp + 4); #else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 28c758bafb43..5979436048d6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1281,7 +1281,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return -EFAULT; } - if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) { + if (is_vm_hugetlb_page(vma) && !logging_active) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; } else { @@ -1801,7 +1801,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the IPA * space addressable by the KVM guest IPA space. */ - if (memslot->base_gfn + memslot->npages > + if (memslot->base_gfn + memslot->npages >= (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c index fd6c9169fa78..96a3d73ef4bf 100644 --- a/arch/arm/mach-footbridge/dc21285.c +++ b/arch/arm/mach-footbridge/dc21285.c @@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, if (addr) switch (size) { case 1: - asm volatile("ldrb %0, [%1, %2]" + asm("ldrb %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 2: - asm volatile("ldrh %0, [%1, %2]" + asm("ldrh %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 4: - asm volatile("ldr %0, [%1, %2]" + asm("ldr %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; } @@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, if (addr) switch (size) { case 1: - asm volatile("strb %0, [%1, %2]" + asm("strb %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 2: - asm volatile("strh %0, [%1, %2]" + asm("strh %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 4: - asm volatile("str %0, [%1, %2]" + asm("str %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index f2dcbe14cb67..fff529c5f9b3 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -605,7 +604,6 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata static void imx6_pm_stby_poweroff(void) { - gic_cpu_if_down(0); imx6_set_lpm(STOP_POWER_OFF); imx6q_suspend_finish(0); diff --git a/arch/arm/mach-imx/suspend-imx53.S b/arch/arm/mach-imx/suspend-imx53.S index f12d24104075..5ed078ad110a 100644 --- a/arch/arm/mach-imx/suspend-imx53.S +++ b/arch/arm/mach-imx/suspend-imx53.S @@ -33,11 +33,11 @@ * ^ * ^ * imx53_suspend code - * PM_INFO structure(imx5_cpu_suspend_info) + * PM_INFO structure(imx53_suspend_info) * ======================== low address ======================= */ -/* Offsets of members of struct imx5_cpu_suspend_info */ +/* Offsets of members of struct imx53_suspend_info */ #define SUSPEND_INFO_MX53_M4IF_V_OFFSET 0x0 #define SUSPEND_INFO_MX53_IOMUXC_V_OFFSET 0x4 #define SUSPEND_INFO_MX53_IO_COUNT_OFFSET 0x8 diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S index 99d2e296082c..7d84b617af48 100644 --- a/arch/arm/mach-imx/suspend-imx6.S +++ b/arch/arm/mach-imx/suspend-imx6.S @@ -73,7 +73,6 @@ #define MX6Q_CCM_CCR 0x0 .align 3 - .arm .macro sync_l2_cache diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index 0f1f5c4141d5..c279293f084c 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -71,7 +71,7 @@ static phys_addr_t keystone_virt_to_idmap(unsigned long x) static long long __init keystone_pv_fixup(void) { long long offset; - u64 mem_start, mem_end; + phys_addr_t mem_start, mem_end; mem_start = memblock_start_of_DRAM(); mem_end = memblock_end_of_DRAM(); @@ -84,7 +84,7 @@ static long long __init keystone_pv_fixup(void) if (mem_start < KEYSTONE_HIGH_PHYS_START || mem_end > KEYSTONE_HIGH_PHYS_END) { pr_crit("Invalid address space for memory (%08llx-%08llx)\n", - mem_start, mem_end); + (u64)mem_start, (u64)mem_end); return 0; } diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 68af9d9566cb..b6443a4e0c78 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -328,7 +328,6 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot) static void n8x0_mmc_callback(void *data, u8 card_mask) { -#ifdef CONFIG_MMC_OMAP int bit, *openp, index; if (board_is_n800()) { @@ -346,6 +345,7 @@ static void n8x0_mmc_callback(void *data, u8 card_mask) else *openp = 0; +#ifdef CONFIG_MMC_OMAP omap_mmc_notify_cover_event(mmc_device, index, *openp); #else pr_warn("MMC: notify cover event not available\n"); diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h index 1b41e23db98e..5bc6ea87cdf7 100644 --- a/arch/arm/mach-socfpga/core.h +++ b/arch/arm/mach-socfpga/core.h @@ -44,7 +44,7 @@ extern void __iomem *sdr_ctl_base_addr; u32 socfpga_sdram_self_refresh(u32 sdr_base); extern unsigned int socfpga_sdram_self_refresh_sz; -extern char secondary_trampoline[], secondary_trampoline_end[]; +extern char secondary_trampoline, secondary_trampoline_end; extern unsigned long socfpga_cpu1start_addr; diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c index ff1d13d3ef72..15c8ce8965f4 100644 --- a/arch/arm/mach-socfpga/platsmp.c +++ b/arch/arm/mach-socfpga/platsmp.c @@ -31,14 +31,14 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = secondary_trampoline_end - secondary_trampoline; + int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; if (socfpga_cpu1start_addr) { /* This will put CPU #1 into reset. */ writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST); - memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); writel(virt_to_phys(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); @@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle) { - int trampoline_size = secondary_trampoline_end - secondary_trampoline; + int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; if (socfpga_cpu1start_addr) { writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_MODMPURST); - memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size); + memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); writel(virt_to_phys(secondary_startup), sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index f46089b24588..71115afb71a0 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -724,7 +724,7 @@ config CPU_BIG_ENDIAN config CPU_ENDIAN_BE8 bool depends on CPU_BIG_ENDIAN - default CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M + default CPU_V6 || CPU_V6K || CPU_V7 help Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index bf24690ec83a..d130a5ece5d5 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -17,25 +17,26 @@ /* * Faraday optimised copy_user_page */ -static void fa_copy_user_page(void *kto, const void *kfrom) +static void __naked +fa_copy_user_page(void *kto, const void *kfrom) { - int tmp; - - asm volatile ("\ -1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - stmia %0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add %0, %0, #16 @ 1\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - stmia %0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add %0, %0, #16 @ 1\n\ - subs %2, %2, #1 @ 1\n\ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ +1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add r0, r0, #16 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add r0, r0, #16 @ 1\n\ + subs r2, r2, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB" - : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) - : "2" (PAGE_SIZE / 32) - : "r3", "r4", "ip", "lr"); + mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 32)); } void fa_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index cc819732d9b8..49ee0c1a7209 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,56 +13,58 @@ #include #include -static void feroceon_copy_user_page(void *kto, const void *kfrom) +static void __naked +feroceon_copy_user_page(void *kto, const void *kfrom) { - int tmp; - - asm volatile ("\ -1: ldmia %1!, {r2 - r7, ip, lr} \n\ - pld [%1, #0] \n\ - pld [%1, #32] \n\ - pld [%1, #64] \n\ - pld [%1, #96] \n\ - pld [%1, #128] \n\ - pld [%1, #160] \n\ - pld [%1, #192] \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - ldmia %1!, {r2 - r7, ip, lr} \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ - stmia %0, {r2 - r7, ip, lr} \n\ - subs %2, %2, #(32 * 8) \n\ - mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add %0, %0, #32 \n\ + asm("\ + stmfd sp!, {r4-r9, lr} \n\ + mov ip, %2 \n\ +1: mov lr, r1 \n\ + ldmia r1!, {r2 - r9} \n\ + pld [lr, #32] \n\ + pld [lr, #64] \n\ + pld [lr, #96] \n\ + pld [lr, #128] \n\ + pld [lr, #160] \n\ + pld [lr, #192] \n\ + pld [lr, #224] \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + subs ip, ip, #(32 * 8) \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ bne 1b \n\ - mcr p15, 0, %2, c7, c10, 4 @ drain WB" - : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) - : "2" (PAGE_SIZE) - : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); + mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r9, pc}" + : + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); } void feroceon_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index db624170854a..1267e64133b9 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -40,11 +40,12 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void mc_copy_user_page(void *from, void *to) +static void __naked +mc_copy_user_page(void *from, void *to) { - int tmp; - - asm volatile ("\ + asm volatile( + "stmfd sp!, {r4, lr} @ 2\n\ + mov r4, %2 @ 1\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ @@ -54,13 +55,13 @@ static void mc_copy_user_page(void *from, void *to) mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ - subs %2, %2, #1 @ 1\n\ + subs r4, r4, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ - bne 1b @ " - : "+&r" (from), "+&r" (to), "=&r" (tmp) - : "2" (PAGE_SIZE / 64) - : "r2", "r3", "ip", "lr"); + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index cd3e165afeed..067d0fdd630c 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,28 +22,29 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void v4wb_copy_user_page(void *kto, const void *kfrom) +static void __naked +v4wb_copy_user_page(void *kto, const void *kfrom) { - int tmp; - - asm volatile ("\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ -1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - subs %2, %2, #1 @ 1\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" - : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) - : "2" (PAGE_SIZE / 64) - : "r3", "r4", "ip", "lr"); + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); } void v4wb_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 8614572e1296..b85c5da2e510 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,26 +20,27 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void v4wt_copy_user_page(void *kto, const void *kfrom) +static void __naked +v4wt_copy_user_page(void *kto, const void *kfrom) { - int tmp; - - asm volatile ("\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ -1: stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmia %1!, {r3, r4, ip, lr} @ 4\n\ - subs %2, %2, #1 @ 1\n\ - stmia %0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, %2, c7, c7, 0 @ flush ID cache" - : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) - : "2" (PAGE_SIZE / 64) - : "r3", "r4", "ip", "lr"); + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); } void v4wt_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 55cbc3a89d85..03a2042aced5 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -21,46 +21,53 @@ /* * XSC3 optimised copy_user_highpage + * r0 = destination + * r1 = source * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) +static void __naked +xsc3_mc_copy_user_page(void *kto, const void *kfrom) { - int tmp; - - asm volatile ("\ - pld [%1, #0] \n\ - pld [%1, #32] \n\ -1: pld [%1, #64] \n\ - pld [%1, #96] \n\ + asm("\ + stmfd sp!, {r4, r5, lr} \n\ + mov lr, %2 \n\ \n\ -2: ldrd r2, [%1], #8 \n\ - ldrd r4, [%1], #8 \n\ - mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ - strd r2, [%0], #8 \n\ - ldrd r2, [%1], #8 \n\ - strd r4, [%0], #8 \n\ - ldrd r4, [%1], #8 \n\ - strd r2, [%0], #8 \n\ - strd r4, [%0], #8 \n\ - ldrd r2, [%1], #8 \n\ - ldrd r4, [%1], #8 \n\ - mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ - strd r2, [%0], #8 \n\ - ldrd r2, [%1], #8 \n\ - subs %2, %2, #1 \n\ - strd r4, [%0], #8 \n\ - ldrd r4, [%1], #8 \n\ - strd r2, [%0], #8 \n\ - strd r4, [%0], #8 \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r1, #64] \n\ + pld [r1, #96] \n\ + \n\ +2: ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + subs lr, lr, #1 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ bgt 1b \n\ - beq 2b " - : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) - : "2" (PAGE_SIZE / 64 - 1) - : "r2", "r3", "r4", "r5"); + beq 2b \n\ + \n\ + ldmfd sp!, {r4, r5, pc}" + : + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, @@ -78,6 +85,8 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page + * r0 = destination + * r1 = virtual user address of ultimate destination page */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index c775d4b7adb0..0fb85025344d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -36,51 +36,52 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void mc_copy_user_page(void *from, void *to) +static void __naked +mc_copy_user_page(void *from, void *to) { - int tmp; - /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ - asm volatile ("\ - pld [%0, #0] \n\ - pld [%0, #32] \n\ - pld [%1, #0] \n\ - pld [%1, #32] \n\ -1: pld [%0, #64] \n\ - pld [%0, #96] \n\ - pld [%1, #64] \n\ - pld [%1, #96] \n\ -2: ldrd r2, [%0], #8 \n\ - ldrd r4, [%0], #8 \n\ - mov ip, %1 \n\ - strd r2, [%1], #8 \n\ - ldrd r2, [%0], #8 \n\ - strd r4, [%1], #8 \n\ - ldrd r4, [%0], #8 \n\ - strd r2, [%1], #8 \n\ - strd r4, [%1], #8 \n\ + asm volatile( + "stmfd sp!, {r4, r5, lr} \n\ + mov lr, %2 \n\ + pld [r0, #0] \n\ + pld [r0, #32] \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r0, #64] \n\ + pld [r0, #96] \n\ + pld [r1, #64] \n\ + pld [r1, #96] \n\ +2: ldrd r2, [r0], #8 \n\ + ldrd r4, [r0], #8 \n\ + mov ip, r1 \n\ + strd r2, [r1], #8 \n\ + ldrd r2, [r0], #8 \n\ + strd r4, [r1], #8 \n\ + ldrd r4, [r0], #8 \n\ + strd r2, [r1], #8 \n\ + strd r4, [r1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - ldrd r2, [%0], #8 \n\ + ldrd r2, [r0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - ldrd r4, [%0], #8 \n\ - mov ip, %1 \n\ - strd r2, [%1], #8 \n\ - ldrd r2, [%0], #8 \n\ - strd r4, [%1], #8 \n\ - ldrd r4, [%0], #8 \n\ - strd r2, [%1], #8 \n\ - strd r4, [%1], #8 \n\ + ldrd r4, [r0], #8 \n\ + mov ip, r1 \n\ + strd r2, [r1], #8 \n\ + ldrd r2, [r0], #8 \n\ + strd r4, [r1], #8 \n\ + ldrd r4, [r0], #8 \n\ + strd r2, [r1], #8 \n\ + strd r4, [r1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - subs %2, %2, #1 \n\ + subs lr, lr, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ - beq 2b " - : "+&r" (from), "+&r" (to), "=&r" (tmp) - : "2" (PAGE_SIZE / 64 - 1) - : "r2", "r3", "r4", "r5", "ip"); + beq 2b \n\ + ldmfd sp!, {r4, r5, pc} " + : + : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index fd5cef1ec606..f353849d9388 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -228,14 +228,12 @@ early_param("ecc", early_ecc); static int __init early_cachepolicy(char *p) { pr_warn("cachepolicy kernel parameter not supported without cp15\n"); - return 0; } early_param("cachepolicy", early_cachepolicy); static int __init noalign_setup(char *__unused) { pr_warn("noalign kernel parameter not supported without cp15\n"); - return 1; } __setup("noalign", noalign_setup); diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb51f8a49e5a..8985959cbe64 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -329,7 +329,6 @@ ENTRY(\name\()_cache_fns) .macro define_tlb_functions name:req, flags_up:req, flags_smp .type \name\()_tlb_fns, #object - .align 2 ENTRY(\name\()_tlb_fns) .long \name\()_flush_user_tlb_range .long \name\()_flush_kern_tlb_range diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index bc7a5dbaf423..3eb018fa1a1f 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -270,7 +270,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: - case KPROBE_HIT_SS: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); @@ -279,11 +278,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; - case KPROBE_REENTER: - /* A nested probe was hit in FIQ, it is a BUG */ - pr_warn("Unrecoverable kprobe detected at %p.\n", - p->addr); - /* fall through */ default: /* impossible cases */ BUG(); @@ -666,7 +660,7 @@ static struct undef_hook kprobes_arm_break_hook = { #endif /* !CONFIG_THUMB2_KERNEL */ -int __init arch_init_kprobes(void) +int __init arch_init_kprobes() { arm_probes_decode_init(); #ifdef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c index 4254391f3906..b683b4517458 100644 --- a/arch/arm/probes/kprobes/test-thumb.c +++ b/arch/arm/probes/kprobes/test-thumb.c @@ -444,21 +444,21 @@ void kprobe_thumb32_test_cases(void) "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]", + TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]", + TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" "3: mvn r0, r0 \n\t" "2: nop \n\t") - TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]", + TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", "9: \n\t" ".short (2f-1b-4)>>1 \n\t" ".short (3f-1b-4)>>1 \n\t" @@ -471,10 +471,10 @@ void kprobe_thumb32_test_cases(void) TEST_UNSUPPORTED("strexb r0, r1, [r2]") TEST_UNSUPPORTED("strexh r0, r1, [r2]") - TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]") + TEST_UNSUPPORTED("strexd r0, r1, [r2]") TEST_UNSUPPORTED("ldrexb r0, [r1]") TEST_UNSUPPORTED("ldrexh r0, [r1]") - TEST_UNSUPPORTED("ldrexd r0, r1, [r1]") + TEST_UNSUPPORTED("ldrexd r0, [r1]") TEST_GROUP("Data-processing (shifted register) and (modified immediate)") diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c index b97230704b74..d1329f1ba4e4 100644 --- a/arch/arm/probes/uprobes/core.c +++ b/arch/arm/probes/uprobes/core.c @@ -207,7 +207,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) static struct undef_hook uprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff), - .cpsr_mask = (PSR_T_BIT | MODE_MASK), + .cpsr_mask = MODE_MASK, .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; @@ -215,7 +215,7 @@ static struct undef_hook uprobes_arm_break_hook = { static struct undef_hook uprobes_arm_ss_hook = { .instr_mask = 0x0fffffff, .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff), - .cpsr_mask = (PSR_T_BIT | MODE_MASK), + .cpsr_mask = MODE_MASK, .cpsr_val = USR_MODE, .fn = uprobe_trap_handler, }; diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index b4ec8d1b0bef..0ed01f2d5ee4 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -91,39 +91,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i; for (i = 0; i < count; i++) { - struct gnttab_unmap_grant_ref unmap; - int rc; - if (map_ops[i].status) continue; - if (likely(set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, - map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) - continue; - - /* - * Signal an error for this slot. This in turn requires - * immediate unmapping. - */ - map_ops[i].status = GNTST_general_error; - unmap.host_addr = map_ops[i].host_addr, - unmap.handle = map_ops[i].handle; - map_ops[i].handle = ~0; - if (map_ops[i].flags & GNTMAP_device_map) - unmap.dev_bus_addr = map_ops[i].dev_bus_addr; - else - unmap.dev_bus_addr = 0; - - /* - * Pre-populate the status field, to be recognizable in - * the log message below. - */ - unmap.status = 1; - - rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - &unmap, 1); - if (rc || unmap.status != GNTST_okay) - pr_err_once("gnttab unmap failed: rc=%d st=%d\n", - rc, unmap.status); + set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT, + map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT); } return 0; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 51916f6e6ab3..652b480b8979 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -482,7 +482,7 @@ config ARM64_ERRATUM_1024718 help This option adds work around for Arm Cortex-A55 Erratum 1024718. - Affected Cortex-A55 cores (all revisions) could cause incorrect + Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect update of the hardware dirty bit when the DBM/AP bits are updated without a break-before-make. The work around is to disable the usage of hardware DBM locally on the affected cores. CPUs not affected by diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 3ad887459f4d..953be757ef92 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -123,15 +123,6 @@ config MACH_SONY_POPLAR_DSDS If you enable this config, please use SONY Mobile device tree. -config MACH_SONY_POPLAR_KDDI - bool "Sony Mobile Poplar Kddi" - depends on ARCH_SONY_YOSHINO - help - Support for the SONY Mobile Poplar Kddi device - which is based on SONY Yoshino platform. - If you enable this config, - please use SONY Mobile device tree. - config MACH_SONY_LILAC bool "Sony Mobile Lilac" depends on ARCH_SONY_YOSHINO diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index cb863891f29e..f9c5a549c2c0 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -90,7 +90,7 @@ #address-cells = <0>; interrupt-controller; reg = <0x11001000 0x1000>, - <0x11002000 0x2000>, + <0x11002000 0x1000>, <0x11004000 0x2000>, <0x11006000 0x2000>; }; diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index 729e80868871..3c27b6cb62a0 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -16,8 +16,6 @@ CONFIG_CGROUP_SCHEDTUNE=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y -CONFIG_NAMESPACES=y CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y CONFIG_DEFAULT_USE_ENERGY_AWARE=y @@ -46,6 +44,8 @@ CONFIG_PCI_HOST_GENERIC=y CONFIG_PREEMPT=y CONFIG_HZ_100=y # CONFIG_SPARSEMEM_VMEMMAP is not set +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_ZSMALLOC=y CONFIG_SECCOMP=y CONFIG_ARMV8_DEPRECATED=y @@ -54,6 +54,7 @@ CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y CONFIG_ARM64_SW_TTBR0_PAN=y CONFIG_RANDOMIZE_BASE=y +# CONFIG_EFI is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y CONFIG_PM_WAKELOCKS=y @@ -187,7 +188,6 @@ CONFIG_DEBUG_DEVRES=y CONFIG_OF_UNITTEST=y CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y @@ -197,15 +197,12 @@ CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_VIRTIO=y CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y CONFIG_BLK_DEV_DM=y CONFIG_DM_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y CONFIG_DM_VERITY_AVB=y -CONFIG_DM_ANDROID_VERITY=y CONFIG_NETDEVICES=y CONFIG_NETCONSOLE=y CONFIG_NETCONSOLE_DYNAMIC=y @@ -389,7 +386,6 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -# CONFIG_EFIVAR_FS is not set CONFIG_SDCARD_FS=y CONFIG_PSTORE=y CONFIG_PSTORE_CONSOLE=y @@ -414,13 +410,7 @@ CONFIG_SECURITY_NETWORK=y CONFIG_LSM_MMAP_MIN_ADDR=65536 CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y -CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_SHA512=y CONFIG_CRYPTO_LZ4=y CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509" CONFIG_XZ_DEC=y diff --git a/arch/arm64/configs/diffconfig/poplar_kddi_diffconfig b/arch/arm64/configs/diffconfig/poplar_kddi_diffconfig deleted file mode 100644 index d11417fde1f6..000000000000 --- a/arch/arm64/configs/diffconfig/poplar_kddi_diffconfig +++ /dev/null @@ -1,8 +0,0 @@ -CONFIG_MACH_SONY_POPLAR_KDDI=y -CONFIG_MMTUNER_MN8855x=y -CONFIG_SENSORS_TCS3490=y -CONFIG_TOF_SENSOR=y -CONFIG_TOUCHSCREEN_CLEARPAD=y -# CONFIG_MMTUNER_DEBUG is not set -CONFIG_TOUCHSCREEN_CLEARPAD_I2C=y -CONFIG_TOUCHSCREEN_CLEARPAD_RMI_DEV=y diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-lilac_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-lilac_defconfig index ed76cae7a6f4..955fa9bed852 100644 --- a/arch/arm64/configs/lineage-msm8998-yoshino-lilac_defconfig +++ b/arch/arm64/configs/lineage-msm8998-yoshino-lilac_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration +# Linux/arm64 4.4.245 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -98,7 +98,7 @@ CONFIG_HIGH_RES_TIMERS=y # CONFIG_TICK_CPU_ACCOUNTING is not set # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y +# CONFIG_SCHED_WALT is not set # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_TASKSTATS=y # CONFIG_TASK_DELAY_ACCT is not set @@ -134,7 +134,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_DEVICE is not set @@ -150,8 +150,12 @@ CONFIG_MEMCG_SWAP_ENABLED=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set +CONFIG_SCHED_HMP=y +CONFIG_SCHED_HMP_CSTATE_AWARE=y +CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set @@ -160,7 +164,7 @@ CONFIG_NAMESPACES=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y +# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y @@ -223,8 +227,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y @@ -363,7 +366,6 @@ CONFIG_ARCH_SONY_YOSHINO=y # CONFIG_MACH_SONY_MAPLE_DSDS is not set # CONFIG_MACH_SONY_POPLAR is not set # CONFIG_MACH_SONY_POPLAR_DSDS is not set -# CONFIG_MACH_SONY_POPLAR_KDDI is not set CONFIG_MACH_SONY_LILAC=y CONFIG_ARCH_MSMHAMSTER=y # CONFIG_ARCH_SDM660 is not set @@ -494,6 +496,7 @@ CONFIG_CMA_DEBUGFS=y CONFIG_CMA_AREAS=7 CONFIG_ZPOOL=y # CONFIG_ZBUD is not set +CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_PGTABLE_MAPPING is not set # CONFIG_ZSMALLOC_STAT is not set @@ -549,7 +552,6 @@ CONFIG_CRASH_NOTES=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y # CONFIG_HAVE_AOUT is not set @@ -582,7 +584,7 @@ CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set +CONFIG_WAKEUP_IRQ_DEBUG=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -621,12 +623,12 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_BOOST=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # # CPU frequency scaling drivers @@ -730,7 +732,7 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration @@ -852,6 +854,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y @@ -1235,8 +1238,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_ZRAM_LZ4_COMPRESS=y # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -1299,11 +1301,11 @@ CONFIG_POWERKEY_FORCECRASH=y # CONFIG_CXD224X_NFC is not set # CONFIG_BD7602_POWER_IC is not set # CONFIG_ONESEG_TUNER_SMTVJ19X is not set -CONFIG_NFC_PN553_DEVICES=y -CONFIG_LDO_VIBRATOR=y +CONFIG_NFC_PN553_DEVICES=m +CONFIG_LDO_VIBRATOR=m CONFIG_TOF_SENSOR=y CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y +CONFIG_SIM_DETECT=m # CONFIG_C2PORT is not set # @@ -1360,11 +1362,6 @@ CONFIG_MSM_ULTRASOUND=y # CONFIG_CXL_EEH is not set # CONFIG_MMTUNER_MN8855x is not set -# -# Sony Carillon NFC driver -# -# CONFIG_NFC_CARILLON is not set - # # SCSI device support # @@ -1948,7 +1945,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y +CONFIG_INPUT_BU520X1NVX=m # CONFIG_INPUT_HBTP_INPUT is not set # CONFIG_INPUT_PM8941_PWRKEY is not set CONFIG_INPUT_QPNP_POWER_ON=y @@ -1979,7 +1976,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y +CONFIG_FPC1145_PLATFORM=m # CONFIG_INPUT_ADUX1050 is not set # CONFIG_BOSCH_DRIVER_LOG_FUNC is not set # CONFIG_SENSORS_BMA2X2 is not set @@ -2091,7 +2088,7 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # CONFIG_MSM_SMD_PKT is not set # CONFIG_XILLYBUS is not set CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set +CONFIG_MSM_RDBG=m # # I2C support @@ -2210,7 +2207,6 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y CONFIG_VIRTSPMI_MSM_PMIC_ARB=y @@ -2261,7 +2257,7 @@ CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set CONFIG_PINCTRL_MSM8998=y # CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set +CONFIG_PINCTRL_SDM660=y CONFIG_PINCTRL_WCD=y # CONFIG_PINCTRL_LPI is not set CONFIG_PINCTRL_SOMC=y @@ -2761,7 +2757,56 @@ CONFIG_MEDIA_USB_SUPPORT=y # Webcam devices # # CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set # CONFIG_USB_PWC is not set # CONFIG_VIDEO_CPIA2 is not set # CONFIG_USB_ZR364XX is not set @@ -2801,9 +2846,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y # QTI MSM Camera And Video & AIS # CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_DEBUG=y CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set +CONFIG_MSMB_CAMERA_DEBUG=y CONFIG_MSM_CAMERA_SENSOR=y CONFIG_MSM_CPP=y CONFIG_MSM_CCI=y @@ -2838,10 +2883,15 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_VMEM=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y # CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_NUM_DMX_DEVICES=4 +CONFIG_DVB_MPQ_TSPP1=y +# CONFIG_DVB_MPQ_SW is not set +# CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX is not set +CONFIG_TSPP=m # # Supported MMC/SDIO adapters @@ -3663,7 +3713,8 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_MMC_EMBEDDED_SDIO is not set # CONFIG_MMC_PARANOID_SD_INIT is not set CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set +CONFIG_MMC_CMD_DEBUG=y +CONFIG_MMC_CMD_QUEUE_SIZE=256 # # MMC/SD/SDIO Card Drivers @@ -4063,12 +4114,12 @@ CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y # CONFIG_FSL_MC_BUS is not set # CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y +CONFIG_SONY_FIPS_KSCL=m # # Qualcomm Atheros CLD WLAN module # -CONFIG_QCA_CLD_WLAN=y +CONFIG_QCA_CLD_WLAN=m # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set @@ -4091,8 +4142,8 @@ CONFIG_RMNET_IPA3=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y # CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_EXT_DISPLAY=y # CONFIG_SDIO_QCN is not set @@ -4173,9 +4224,9 @@ CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y CONFIG_MSM_GLINK_SPI_XPRT=y CONFIG_MSM_SPCOM=y CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set +CONFIG_MSM_SMEM_LOGGING=y CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set +CONFIG_MSM_SMP2P_TEST=y CONFIG_MSM_QMI_INTERFACE=y # CONFIG_MSM_L2_IA_DEBUG is not set CONFIG_MSM_RPM_SMD=y @@ -4226,7 +4277,8 @@ CONFIG_MSM_QDSP6_NOTIFIER=y CONFIG_MSM_ADSP_LOADER=y # CONFIG_MSM_CDSP_LOADER is not set # CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set +CONFIG_MSM_PERFORMANCE=y +# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set CONFIG_MSM_SUBSYSTEM_RESTART=y # CONFIG_MSM_SYSMON_COMM is not set CONFIG_MSM_PIL=y @@ -4574,7 +4626,7 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ISCSI_IBFT is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set +CONFIG_MSM_TZ_LOG=y # CONFIG_BIF is not set CONFIG_SENSORS_SSC=y # CONFIG_TEE is not set @@ -4991,7 +5043,7 @@ CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y CONFIG_STRICT_DEVMEM=y # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set +CONFIG_DEBUG_SET_MODULE_RONX=y CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y # CONFIG_FORCE_PAGES is not set @@ -5178,9 +5230,9 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y +# CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-maple_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-maple_defconfig index 233cd3e183e2..e05e24eeab66 100644 --- a/arch/arm64/configs/lineage-msm8998-yoshino-maple_defconfig +++ b/arch/arm64/configs/lineage-msm8998-yoshino-maple_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration +# Linux/arm64 4.4.245 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -98,7 +98,7 @@ CONFIG_HIGH_RES_TIMERS=y # CONFIG_TICK_CPU_ACCOUNTING is not set # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y +# CONFIG_SCHED_WALT is not set # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_TASKSTATS=y # CONFIG_TASK_DELAY_ACCT is not set @@ -134,7 +134,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_DEVICE is not set @@ -150,8 +150,12 @@ CONFIG_MEMCG_SWAP_ENABLED=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set +CONFIG_SCHED_HMP=y +CONFIG_SCHED_HMP_CSTATE_AWARE=y +CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set @@ -160,7 +164,7 @@ CONFIG_NAMESPACES=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y +# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y @@ -223,8 +227,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y @@ -363,7 +366,6 @@ CONFIG_MACH_SONY_MAPLE=y # CONFIG_MACH_SONY_MAPLE_DSDS is not set # CONFIG_MACH_SONY_POPLAR is not set # CONFIG_MACH_SONY_POPLAR_DSDS is not set -# CONFIG_MACH_SONY_POPLAR_KDDI is not set # CONFIG_MACH_SONY_LILAC is not set CONFIG_ARCH_MSMHAMSTER=y # CONFIG_ARCH_SDM660 is not set @@ -494,6 +496,7 @@ CONFIG_CMA_DEBUGFS=y CONFIG_CMA_AREAS=7 CONFIG_ZPOOL=y # CONFIG_ZBUD is not set +CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_PGTABLE_MAPPING is not set # CONFIG_ZSMALLOC_STAT is not set @@ -549,7 +552,6 @@ CONFIG_CRASH_NOTES=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y # CONFIG_HAVE_AOUT is not set @@ -582,7 +584,7 @@ CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set +CONFIG_WAKEUP_IRQ_DEBUG=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -621,12 +623,12 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_BOOST=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # # CPU frequency scaling drivers @@ -730,7 +732,7 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration @@ -852,6 +854,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y @@ -1235,8 +1238,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_ZRAM_LZ4_COMPRESS=y # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -1299,11 +1301,11 @@ CONFIG_POWERKEY_FORCECRASH=y # CONFIG_CXD224X_NFC is not set # CONFIG_BD7602_POWER_IC is not set # CONFIG_ONESEG_TUNER_SMTVJ19X is not set -CONFIG_NFC_PN553_DEVICES=y -CONFIG_LDO_VIBRATOR=y +CONFIG_NFC_PN553_DEVICES=m +CONFIG_LDO_VIBRATOR=m CONFIG_TOF_SENSOR=y CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y +CONFIG_SIM_DETECT=m # CONFIG_C2PORT is not set # @@ -1360,11 +1362,6 @@ CONFIG_MSM_ULTRASOUND=y # CONFIG_CXL_EEH is not set # CONFIG_MMTUNER_MN8855x is not set -# -# Sony Carillon NFC driver -# -# CONFIG_NFC_CARILLON is not set - # # SCSI device support # @@ -1948,7 +1945,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y +CONFIG_INPUT_BU520X1NVX=m # CONFIG_INPUT_HBTP_INPUT is not set # CONFIG_INPUT_PM8941_PWRKEY is not set CONFIG_INPUT_QPNP_POWER_ON=y @@ -1979,7 +1976,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y +CONFIG_FPC1145_PLATFORM=m # CONFIG_INPUT_ADUX1050 is not set # CONFIG_BOSCH_DRIVER_LOG_FUNC is not set # CONFIG_SENSORS_BMA2X2 is not set @@ -2091,7 +2088,7 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # CONFIG_MSM_SMD_PKT is not set # CONFIG_XILLYBUS is not set CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set +CONFIG_MSM_RDBG=m # # I2C support @@ -2210,7 +2207,6 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y CONFIG_VIRTSPMI_MSM_PMIC_ARB=y @@ -2261,7 +2257,7 @@ CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set CONFIG_PINCTRL_MSM8998=y # CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set +CONFIG_PINCTRL_SDM660=y CONFIG_PINCTRL_WCD=y # CONFIG_PINCTRL_LPI is not set CONFIG_PINCTRL_SOMC=y @@ -2761,7 +2757,56 @@ CONFIG_MEDIA_USB_SUPPORT=y # Webcam devices # # CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set # CONFIG_USB_PWC is not set # CONFIG_VIDEO_CPIA2 is not set # CONFIG_USB_ZR364XX is not set @@ -2801,9 +2846,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y # QTI MSM Camera And Video & AIS # CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_DEBUG=y CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set +CONFIG_MSMB_CAMERA_DEBUG=y CONFIG_MSM_CAMERA_SENSOR=y CONFIG_MSM_CPP=y CONFIG_MSM_CCI=y @@ -2837,10 +2882,15 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_VMEM=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y # CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_NUM_DMX_DEVICES=4 +CONFIG_DVB_MPQ_TSPP1=y +# CONFIG_DVB_MPQ_SW is not set +# CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX is not set +CONFIG_TSPP=m # # Supported MMC/SDIO adapters @@ -3662,7 +3712,8 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_MMC_EMBEDDED_SDIO is not set # CONFIG_MMC_PARANOID_SD_INIT is not set CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set +CONFIG_MMC_CMD_DEBUG=y +CONFIG_MMC_CMD_QUEUE_SIZE=256 # # MMC/SD/SDIO Card Drivers @@ -4062,12 +4113,12 @@ CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y # CONFIG_FSL_MC_BUS is not set # CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y +CONFIG_SONY_FIPS_KSCL=m # # Qualcomm Atheros CLD WLAN module # -CONFIG_QCA_CLD_WLAN=y +CONFIG_QCA_CLD_WLAN=m # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set @@ -4090,8 +4141,8 @@ CONFIG_RMNET_IPA3=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y # CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_EXT_DISPLAY=y # CONFIG_SDIO_QCN is not set @@ -4172,9 +4223,9 @@ CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y CONFIG_MSM_GLINK_SPI_XPRT=y CONFIG_MSM_SPCOM=y CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set +CONFIG_MSM_SMEM_LOGGING=y CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set +CONFIG_MSM_SMP2P_TEST=y CONFIG_MSM_QMI_INTERFACE=y # CONFIG_MSM_L2_IA_DEBUG is not set CONFIG_MSM_RPM_SMD=y @@ -4225,7 +4276,8 @@ CONFIG_MSM_QDSP6_NOTIFIER=y CONFIG_MSM_ADSP_LOADER=y # CONFIG_MSM_CDSP_LOADER is not set # CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set +CONFIG_MSM_PERFORMANCE=y +# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set CONFIG_MSM_SUBSYSTEM_RESTART=y # CONFIG_MSM_SYSMON_COMM is not set CONFIG_MSM_PIL=y @@ -4573,7 +4625,7 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ISCSI_IBFT is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set +CONFIG_MSM_TZ_LOG=y # CONFIG_BIF is not set CONFIG_SENSORS_SSC=y # CONFIG_TEE is not set @@ -4990,7 +5042,7 @@ CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y CONFIG_STRICT_DEVMEM=y # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set +CONFIG_DEBUG_SET_MODULE_RONX=y CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y # CONFIG_FORCE_PAGES is not set @@ -5177,9 +5229,9 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y +# CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-maple_dsds_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-maple_dsds_defconfig index c6074c3e7ea0..6a958e8a9361 100644 --- a/arch/arm64/configs/lineage-msm8998-yoshino-maple_dsds_defconfig +++ b/arch/arm64/configs/lineage-msm8998-yoshino-maple_dsds_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration +# Linux/arm64 4.4.245 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -98,7 +98,7 @@ CONFIG_HIGH_RES_TIMERS=y # CONFIG_TICK_CPU_ACCOUNTING is not set # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y +# CONFIG_SCHED_WALT is not set # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_TASKSTATS=y # CONFIG_TASK_DELAY_ACCT is not set @@ -134,7 +134,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_DEVICE is not set @@ -150,8 +150,12 @@ CONFIG_MEMCG_SWAP_ENABLED=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set +CONFIG_SCHED_HMP=y +CONFIG_SCHED_HMP_CSTATE_AWARE=y +CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set @@ -160,7 +164,7 @@ CONFIG_NAMESPACES=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y +# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y @@ -223,8 +227,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y @@ -363,7 +366,6 @@ CONFIG_ARCH_SONY_YOSHINO=y CONFIG_MACH_SONY_MAPLE_DSDS=y # CONFIG_MACH_SONY_POPLAR is not set # CONFIG_MACH_SONY_POPLAR_DSDS is not set -# CONFIG_MACH_SONY_POPLAR_KDDI is not set # CONFIG_MACH_SONY_LILAC is not set CONFIG_ARCH_MSMHAMSTER=y # CONFIG_ARCH_SDM660 is not set @@ -494,6 +496,7 @@ CONFIG_CMA_DEBUGFS=y CONFIG_CMA_AREAS=7 CONFIG_ZPOOL=y # CONFIG_ZBUD is not set +CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_PGTABLE_MAPPING is not set # CONFIG_ZSMALLOC_STAT is not set @@ -549,7 +552,6 @@ CONFIG_CRASH_NOTES=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y # CONFIG_HAVE_AOUT is not set @@ -582,7 +584,7 @@ CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set +CONFIG_WAKEUP_IRQ_DEBUG=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -621,12 +623,12 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_BOOST=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # # CPU frequency scaling drivers @@ -730,7 +732,7 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration @@ -852,6 +854,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y @@ -1235,8 +1238,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_ZRAM_LZ4_COMPRESS=y # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -1299,11 +1301,11 @@ CONFIG_POWERKEY_FORCECRASH=y # CONFIG_CXD224X_NFC is not set # CONFIG_BD7602_POWER_IC is not set # CONFIG_ONESEG_TUNER_SMTVJ19X is not set -CONFIG_NFC_PN553_DEVICES=y -CONFIG_LDO_VIBRATOR=y +CONFIG_NFC_PN553_DEVICES=m +CONFIG_LDO_VIBRATOR=m CONFIG_TOF_SENSOR=y CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y +CONFIG_SIM_DETECT=m # CONFIG_C2PORT is not set # @@ -1360,11 +1362,6 @@ CONFIG_MSM_ULTRASOUND=y # CONFIG_CXL_EEH is not set # CONFIG_MMTUNER_MN8855x is not set -# -# Sony Carillon NFC driver -# -# CONFIG_NFC_CARILLON is not set - # # SCSI device support # @@ -1948,7 +1945,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y +CONFIG_INPUT_BU520X1NVX=m # CONFIG_INPUT_HBTP_INPUT is not set # CONFIG_INPUT_PM8941_PWRKEY is not set CONFIG_INPUT_QPNP_POWER_ON=y @@ -1979,7 +1976,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y +CONFIG_FPC1145_PLATFORM=m # CONFIG_INPUT_ADUX1050 is not set # CONFIG_BOSCH_DRIVER_LOG_FUNC is not set # CONFIG_SENSORS_BMA2X2 is not set @@ -2091,7 +2088,7 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # CONFIG_MSM_SMD_PKT is not set # CONFIG_XILLYBUS is not set CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set +CONFIG_MSM_RDBG=m # # I2C support @@ -2210,7 +2207,6 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y CONFIG_VIRTSPMI_MSM_PMIC_ARB=y @@ -2261,7 +2257,7 @@ CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set CONFIG_PINCTRL_MSM8998=y # CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set +CONFIG_PINCTRL_SDM660=y CONFIG_PINCTRL_WCD=y # CONFIG_PINCTRL_LPI is not set CONFIG_PINCTRL_SOMC=y @@ -2761,7 +2757,56 @@ CONFIG_MEDIA_USB_SUPPORT=y # Webcam devices # # CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set # CONFIG_USB_PWC is not set # CONFIG_VIDEO_CPIA2 is not set # CONFIG_USB_ZR364XX is not set @@ -2801,9 +2846,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y # QTI MSM Camera And Video & AIS # CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_DEBUG=y CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set +CONFIG_MSMB_CAMERA_DEBUG=y CONFIG_MSM_CAMERA_SENSOR=y CONFIG_MSM_CPP=y CONFIG_MSM_CCI=y @@ -2837,10 +2882,15 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_VMEM=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y # CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_NUM_DMX_DEVICES=4 +CONFIG_DVB_MPQ_TSPP1=y +# CONFIG_DVB_MPQ_SW is not set +# CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX is not set +CONFIG_TSPP=m # # Supported MMC/SDIO adapters @@ -3662,7 +3712,8 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_MMC_EMBEDDED_SDIO is not set # CONFIG_MMC_PARANOID_SD_INIT is not set CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set +CONFIG_MMC_CMD_DEBUG=y +CONFIG_MMC_CMD_QUEUE_SIZE=256 # # MMC/SD/SDIO Card Drivers @@ -4062,12 +4113,12 @@ CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y # CONFIG_FSL_MC_BUS is not set # CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y +CONFIG_SONY_FIPS_KSCL=m # # Qualcomm Atheros CLD WLAN module # -CONFIG_QCA_CLD_WLAN=y +CONFIG_QCA_CLD_WLAN=m # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set @@ -4090,8 +4141,8 @@ CONFIG_RMNET_IPA3=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y # CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_EXT_DISPLAY=y # CONFIG_SDIO_QCN is not set @@ -4172,9 +4223,9 @@ CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y CONFIG_MSM_GLINK_SPI_XPRT=y CONFIG_MSM_SPCOM=y CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set +CONFIG_MSM_SMEM_LOGGING=y CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set +CONFIG_MSM_SMP2P_TEST=y CONFIG_MSM_QMI_INTERFACE=y # CONFIG_MSM_L2_IA_DEBUG is not set CONFIG_MSM_RPM_SMD=y @@ -4225,7 +4276,8 @@ CONFIG_MSM_QDSP6_NOTIFIER=y CONFIG_MSM_ADSP_LOADER=y # CONFIG_MSM_CDSP_LOADER is not set # CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set +CONFIG_MSM_PERFORMANCE=y +# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set CONFIG_MSM_SUBSYSTEM_RESTART=y # CONFIG_MSM_SYSMON_COMM is not set CONFIG_MSM_PIL=y @@ -4573,7 +4625,7 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ISCSI_IBFT is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set +CONFIG_MSM_TZ_LOG=y # CONFIG_BIF is not set CONFIG_SENSORS_SSC=y # CONFIG_TEE is not set @@ -4990,7 +5042,7 @@ CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y CONFIG_STRICT_DEVMEM=y # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set +CONFIG_DEBUG_SET_MODULE_RONX=y CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y # CONFIG_FORCE_PAGES is not set @@ -5177,9 +5229,9 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y +# CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-poplar_defconfig index 58ecb4416d5c..02cd5eca70f8 100644 --- a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_defconfig +++ b/arch/arm64/configs/lineage-msm8998-yoshino-poplar_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration +# Linux/arm64 4.4.245 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -98,7 +98,7 @@ CONFIG_HIGH_RES_TIMERS=y # CONFIG_TICK_CPU_ACCOUNTING is not set # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y +# CONFIG_SCHED_WALT is not set # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_TASKSTATS=y # CONFIG_TASK_DELAY_ACCT is not set @@ -134,7 +134,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_DEVICE is not set @@ -150,8 +150,12 @@ CONFIG_MEMCG_SWAP_ENABLED=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set +CONFIG_SCHED_HMP=y +CONFIG_SCHED_HMP_CSTATE_AWARE=y +CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set @@ -160,7 +164,7 @@ CONFIG_NAMESPACES=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y +# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y @@ -223,8 +227,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y @@ -363,7 +366,6 @@ CONFIG_ARCH_SONY_YOSHINO=y # CONFIG_MACH_SONY_MAPLE_DSDS is not set CONFIG_MACH_SONY_POPLAR=y # CONFIG_MACH_SONY_POPLAR_DSDS is not set -# CONFIG_MACH_SONY_POPLAR_KDDI is not set # CONFIG_MACH_SONY_LILAC is not set CONFIG_ARCH_MSMHAMSTER=y # CONFIG_ARCH_SDM660 is not set @@ -494,6 +496,7 @@ CONFIG_CMA_DEBUGFS=y CONFIG_CMA_AREAS=7 CONFIG_ZPOOL=y # CONFIG_ZBUD is not set +CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_PGTABLE_MAPPING is not set # CONFIG_ZSMALLOC_STAT is not set @@ -549,7 +552,6 @@ CONFIG_CRASH_NOTES=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y # CONFIG_HAVE_AOUT is not set @@ -582,7 +584,7 @@ CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set +CONFIG_WAKEUP_IRQ_DEBUG=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -621,12 +623,12 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_BOOST=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # # CPU frequency scaling drivers @@ -730,7 +732,7 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration @@ -852,6 +854,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y @@ -1235,8 +1238,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_ZRAM_LZ4_COMPRESS=y # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -1299,11 +1301,11 @@ CONFIG_POWERKEY_FORCECRASH=y # CONFIG_CXD224X_NFC is not set # CONFIG_BD7602_POWER_IC is not set # CONFIG_ONESEG_TUNER_SMTVJ19X is not set -CONFIG_NFC_PN553_DEVICES=y -CONFIG_LDO_VIBRATOR=y +CONFIG_NFC_PN553_DEVICES=m +CONFIG_LDO_VIBRATOR=m CONFIG_TOF_SENSOR=y CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y +CONFIG_SIM_DETECT=m # CONFIG_C2PORT is not set # @@ -1360,11 +1362,6 @@ CONFIG_MSM_ULTRASOUND=y # CONFIG_CXL_EEH is not set # CONFIG_MMTUNER_MN8855x is not set -# -# Sony Carillon NFC driver -# -# CONFIG_NFC_CARILLON is not set - # # SCSI device support # @@ -1948,7 +1945,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y +CONFIG_INPUT_BU520X1NVX=m # CONFIG_INPUT_HBTP_INPUT is not set # CONFIG_INPUT_PM8941_PWRKEY is not set CONFIG_INPUT_QPNP_POWER_ON=y @@ -1979,7 +1976,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y +CONFIG_FPC1145_PLATFORM=m # CONFIG_INPUT_ADUX1050 is not set # CONFIG_BOSCH_DRIVER_LOG_FUNC is not set # CONFIG_SENSORS_BMA2X2 is not set @@ -2091,7 +2088,7 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # CONFIG_MSM_SMD_PKT is not set # CONFIG_XILLYBUS is not set CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set +CONFIG_MSM_RDBG=m # # I2C support @@ -2210,7 +2207,6 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y CONFIG_VIRTSPMI_MSM_PMIC_ARB=y @@ -2261,7 +2257,7 @@ CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set CONFIG_PINCTRL_MSM8998=y # CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set +CONFIG_PINCTRL_SDM660=y CONFIG_PINCTRL_WCD=y # CONFIG_PINCTRL_LPI is not set CONFIG_PINCTRL_SOMC=y @@ -2761,7 +2757,56 @@ CONFIG_MEDIA_USB_SUPPORT=y # Webcam devices # # CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set # CONFIG_USB_PWC is not set # CONFIG_VIDEO_CPIA2 is not set # CONFIG_USB_ZR364XX is not set @@ -2801,9 +2846,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y # QTI MSM Camera And Video & AIS # CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_DEBUG=y CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set +CONFIG_MSMB_CAMERA_DEBUG=y CONFIG_MSM_CAMERA_SENSOR=y CONFIG_MSM_CPP=y CONFIG_MSM_CCI=y @@ -2837,10 +2882,15 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_VMEM=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y # CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_NUM_DMX_DEVICES=4 +CONFIG_DVB_MPQ_TSPP1=y +# CONFIG_DVB_MPQ_SW is not set +# CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX is not set +CONFIG_TSPP=m # # Supported MMC/SDIO adapters @@ -3662,7 +3712,8 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_MMC_EMBEDDED_SDIO is not set # CONFIG_MMC_PARANOID_SD_INIT is not set CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set +CONFIG_MMC_CMD_DEBUG=y +CONFIG_MMC_CMD_QUEUE_SIZE=256 # # MMC/SD/SDIO Card Drivers @@ -4062,12 +4113,12 @@ CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y # CONFIG_FSL_MC_BUS is not set # CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y +CONFIG_SONY_FIPS_KSCL=m # # Qualcomm Atheros CLD WLAN module # -CONFIG_QCA_CLD_WLAN=y +CONFIG_QCA_CLD_WLAN=m # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set @@ -4090,8 +4141,8 @@ CONFIG_RMNET_IPA3=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y # CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_EXT_DISPLAY=y # CONFIG_SDIO_QCN is not set @@ -4172,9 +4223,9 @@ CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y CONFIG_MSM_GLINK_SPI_XPRT=y CONFIG_MSM_SPCOM=y CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set +CONFIG_MSM_SMEM_LOGGING=y CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set +CONFIG_MSM_SMP2P_TEST=y CONFIG_MSM_QMI_INTERFACE=y # CONFIG_MSM_L2_IA_DEBUG is not set CONFIG_MSM_RPM_SMD=y @@ -4225,7 +4276,8 @@ CONFIG_MSM_QDSP6_NOTIFIER=y CONFIG_MSM_ADSP_LOADER=y # CONFIG_MSM_CDSP_LOADER is not set # CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set +CONFIG_MSM_PERFORMANCE=y +# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set CONFIG_MSM_SUBSYSTEM_RESTART=y # CONFIG_MSM_SYSMON_COMM is not set CONFIG_MSM_PIL=y @@ -4573,7 +4625,7 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ISCSI_IBFT is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set +CONFIG_MSM_TZ_LOG=y # CONFIG_BIF is not set CONFIG_SENSORS_SSC=y # CONFIG_TEE is not set @@ -4990,7 +5042,7 @@ CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y CONFIG_STRICT_DEVMEM=y # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set +CONFIG_DEBUG_SET_MODULE_RONX=y CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y # CONFIG_FORCE_PAGES is not set @@ -5177,9 +5229,9 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y +# CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_dsds_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-poplar_dsds_defconfig index 4994938a8a11..48e1e865be8f 100644 --- a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_dsds_defconfig +++ b/arch/arm64/configs/lineage-msm8998-yoshino-poplar_dsds_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration +# Linux/arm64 4.4.245 Kernel Configuration # CONFIG_ARM64=y CONFIG_64BIT=y @@ -98,7 +98,7 @@ CONFIG_HIGH_RES_TIMERS=y # CONFIG_TICK_CPU_ACCOUNTING is not set # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y +# CONFIG_SCHED_WALT is not set # CONFIG_BSD_PROCESS_ACCT is not set CONFIG_TASKSTATS=y # CONFIG_TASK_DELAY_ACCT is not set @@ -134,7 +134,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y # CONFIG_CGROUP_PIDS is not set # CONFIG_CGROUP_DEVICE is not set @@ -150,8 +150,12 @@ CONFIG_MEMCG_SWAP_ENABLED=y # CONFIG_CGROUP_PERF is not set CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set CONFIG_RT_GROUP_SCHED=y # CONFIG_BLK_CGROUP is not set +CONFIG_SCHED_HMP=y +CONFIG_SCHED_HMP_CSTATE_AWARE=y +CONFIG_SCHED_CORE_CTL=y # CONFIG_CHECKPOINT_RESTORE is not set CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set @@ -160,7 +164,7 @@ CONFIG_NAMESPACES=y CONFIG_NET_NS=y # CONFIG_SCHED_AUTOGROUP is not set CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y +# CONFIG_DEFAULT_USE_ENERGY_AWARE is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set CONFIG_BLK_DEV_INITRD=y @@ -223,8 +227,7 @@ CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y # CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y @@ -363,7 +366,6 @@ CONFIG_ARCH_SONY_YOSHINO=y # CONFIG_MACH_SONY_MAPLE_DSDS is not set # CONFIG_MACH_SONY_POPLAR is not set CONFIG_MACH_SONY_POPLAR_DSDS=y -# CONFIG_MACH_SONY_POPLAR_KDDI is not set # CONFIG_MACH_SONY_LILAC is not set CONFIG_ARCH_MSMHAMSTER=y # CONFIG_ARCH_SDM660 is not set @@ -494,6 +496,7 @@ CONFIG_CMA_DEBUGFS=y CONFIG_CMA_AREAS=7 CONFIG_ZPOOL=y # CONFIG_ZBUD is not set +CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y # CONFIG_PGTABLE_MAPPING is not set # CONFIG_ZSMALLOC_STAT is not set @@ -549,7 +552,6 @@ CONFIG_CRASH_NOTES=y # CONFIG_BINFMT_ELF=y CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_SCRIPT=y # CONFIG_HAVE_AOUT is not set @@ -582,7 +584,7 @@ CONFIG_PM_OPP=y CONFIG_PM_CLK=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set +CONFIG_WAKEUP_IRQ_DEBUG=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_SUSPEND_POSSIBLE=y @@ -621,12 +623,12 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_BOOST=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # # CPU frequency scaling drivers @@ -730,7 +732,7 @@ CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set +CONFIG_BRIDGE_NETFILTER=m # # Core Netfilter Configuration @@ -852,6 +854,7 @@ CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y # CONFIG_NETFILTER_XT_MATCH_OSF is not set # CONFIG_NETFILTER_XT_MATCH_OWNER is not set CONFIG_NETFILTER_XT_MATCH_POLICY=y +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y CONFIG_NETFILTER_XT_MATCH_QTAGUID=y CONFIG_NETFILTER_XT_MATCH_QUOTA=y @@ -1235,8 +1238,7 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set # CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set +CONFIG_ZRAM_LZ4_COMPRESS=y # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set @@ -1299,11 +1301,11 @@ CONFIG_POWERKEY_FORCECRASH=y # CONFIG_CXD224X_NFC is not set # CONFIG_BD7602_POWER_IC is not set # CONFIG_ONESEG_TUNER_SMTVJ19X is not set -CONFIG_NFC_PN553_DEVICES=y -CONFIG_LDO_VIBRATOR=y +CONFIG_NFC_PN553_DEVICES=m +CONFIG_LDO_VIBRATOR=m CONFIG_TOF_SENSOR=y CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y +CONFIG_SIM_DETECT=m # CONFIG_C2PORT is not set # @@ -1360,11 +1362,6 @@ CONFIG_MSM_ULTRASOUND=y # CONFIG_CXL_EEH is not set # CONFIG_MMTUNER_MN8855x is not set -# -# Sony Carillon NFC driver -# -# CONFIG_NFC_CARILLON is not set - # # SCSI device support # @@ -1948,7 +1945,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_BMA150 is not set # CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y +CONFIG_INPUT_BU520X1NVX=m # CONFIG_INPUT_HBTP_INPUT is not set # CONFIG_INPUT_PM8941_PWRKEY is not set CONFIG_INPUT_QPNP_POWER_ON=y @@ -1979,7 +1976,7 @@ CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y +CONFIG_FPC1145_PLATFORM=m # CONFIG_INPUT_ADUX1050 is not set # CONFIG_BOSCH_DRIVER_LOG_FUNC is not set # CONFIG_SENSORS_BMA2X2 is not set @@ -2091,7 +2088,7 @@ CONFIG_HW_RANDOM_MSM_LEGACY=y # CONFIG_MSM_SMD_PKT is not set # CONFIG_XILLYBUS is not set CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set +CONFIG_MSM_RDBG=m # # I2C support @@ -2210,7 +2207,6 @@ CONFIG_SPI_QUP=y CONFIG_SPI_SPIDEV=y # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB=y CONFIG_VIRTSPMI_MSM_PMIC_ARB=y @@ -2261,7 +2257,7 @@ CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set CONFIG_PINCTRL_MSM8998=y # CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set +CONFIG_PINCTRL_SDM660=y CONFIG_PINCTRL_WCD=y # CONFIG_PINCTRL_LPI is not set CONFIG_PINCTRL_SOMC=y @@ -2761,7 +2757,56 @@ CONFIG_MEDIA_USB_SUPPORT=y # Webcam devices # # CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set +CONFIG_USB_GSPCA=m +# CONFIG_USB_M5602 is not set +# CONFIG_USB_STV06XX is not set +# CONFIG_USB_GL860 is not set +# CONFIG_USB_GSPCA_BENQ is not set +# CONFIG_USB_GSPCA_CONEX is not set +# CONFIG_USB_GSPCA_CPIA1 is not set +# CONFIG_USB_GSPCA_DTCS033 is not set +# CONFIG_USB_GSPCA_ETOMS is not set +# CONFIG_USB_GSPCA_FINEPIX is not set +# CONFIG_USB_GSPCA_JEILINJ is not set +# CONFIG_USB_GSPCA_JL2005BCD is not set +# CONFIG_USB_GSPCA_KINECT is not set +# CONFIG_USB_GSPCA_KONICA is not set +# CONFIG_USB_GSPCA_MARS is not set +# CONFIG_USB_GSPCA_MR97310A is not set +# CONFIG_USB_GSPCA_NW80X is not set +# CONFIG_USB_GSPCA_OV519 is not set +# CONFIG_USB_GSPCA_OV534 is not set +# CONFIG_USB_GSPCA_OV534_9 is not set +# CONFIG_USB_GSPCA_PAC207 is not set +# CONFIG_USB_GSPCA_PAC7302 is not set +# CONFIG_USB_GSPCA_PAC7311 is not set +# CONFIG_USB_GSPCA_SE401 is not set +# CONFIG_USB_GSPCA_SN9C2028 is not set +# CONFIG_USB_GSPCA_SN9C20X is not set +# CONFIG_USB_GSPCA_SONIXB is not set +# CONFIG_USB_GSPCA_SONIXJ is not set +# CONFIG_USB_GSPCA_SPCA500 is not set +# CONFIG_USB_GSPCA_SPCA501 is not set +# CONFIG_USB_GSPCA_SPCA505 is not set +# CONFIG_USB_GSPCA_SPCA506 is not set +# CONFIG_USB_GSPCA_SPCA508 is not set +# CONFIG_USB_GSPCA_SPCA561 is not set +# CONFIG_USB_GSPCA_SPCA1528 is not set +# CONFIG_USB_GSPCA_SQ905 is not set +# CONFIG_USB_GSPCA_SQ905C is not set +# CONFIG_USB_GSPCA_SQ930X is not set +# CONFIG_USB_GSPCA_STK014 is not set +# CONFIG_USB_GSPCA_STK1135 is not set +# CONFIG_USB_GSPCA_STV0680 is not set +# CONFIG_USB_GSPCA_SUNPLUS is not set +# CONFIG_USB_GSPCA_T613 is not set +# CONFIG_USB_GSPCA_TOPRO is not set +# CONFIG_USB_GSPCA_TOUPTEK is not set +# CONFIG_USB_GSPCA_TV8532 is not set +# CONFIG_USB_GSPCA_VC032X is not set +# CONFIG_USB_GSPCA_VICAM is not set +# CONFIG_USB_GSPCA_XIRLINK_CIT is not set +# CONFIG_USB_GSPCA_ZC3XX is not set # CONFIG_USB_PWC is not set # CONFIG_VIDEO_CPIA2 is not set # CONFIG_USB_ZR364XX is not set @@ -2801,9 +2846,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y # QTI MSM Camera And Video & AIS # CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set +CONFIG_MSM_CAMERA_DEBUG=y CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set +CONFIG_MSMB_CAMERA_DEBUG=y CONFIG_MSM_CAMERA_SENSOR=y CONFIG_MSM_CPP=y CONFIG_MSM_CCI=y @@ -2837,10 +2882,15 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_VMEM=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y # CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_NUM_DMX_DEVICES=4 +CONFIG_DVB_MPQ_TSPP1=y +# CONFIG_DVB_MPQ_SW is not set +# CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX is not set +CONFIG_TSPP=m # # Supported MMC/SDIO adapters @@ -3662,7 +3712,8 @@ CONFIG_MMC_PERF_PROFILING=y # CONFIG_MMC_EMBEDDED_SDIO is not set # CONFIG_MMC_PARANOID_SD_INIT is not set CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set +CONFIG_MMC_CMD_DEBUG=y +CONFIG_MMC_CMD_QUEUE_SIZE=256 # # MMC/SD/SDIO Card Drivers @@ -4062,12 +4113,12 @@ CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y # CONFIG_FSL_MC_BUS is not set # CONFIG_WILC1000_DRIVER is not set # CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y +CONFIG_SONY_FIPS_KSCL=m # # Qualcomm Atheros CLD WLAN module # -CONFIG_QCA_CLD_WLAN=y +CONFIG_QCA_CLD_WLAN=m # CONFIG_GOLDFISH is not set # CONFIG_CHROME_PLATFORMS is not set @@ -4090,8 +4141,8 @@ CONFIG_RMNET_IPA3=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y # CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_EXT_DISPLAY=y # CONFIG_SDIO_QCN is not set @@ -4172,9 +4223,9 @@ CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y CONFIG_MSM_GLINK_SPI_XPRT=y CONFIG_MSM_SPCOM=y CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set +CONFIG_MSM_SMEM_LOGGING=y CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set +CONFIG_MSM_SMP2P_TEST=y CONFIG_MSM_QMI_INTERFACE=y # CONFIG_MSM_L2_IA_DEBUG is not set CONFIG_MSM_RPM_SMD=y @@ -4225,7 +4276,8 @@ CONFIG_MSM_QDSP6_NOTIFIER=y CONFIG_MSM_ADSP_LOADER=y # CONFIG_MSM_CDSP_LOADER is not set # CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set +CONFIG_MSM_PERFORMANCE=y +# CONFIG_MSM_PERFORMANCE_HOTPLUG_ON is not set CONFIG_MSM_SUBSYSTEM_RESTART=y # CONFIG_MSM_SYSMON_COMM is not set CONFIG_MSM_PIL=y @@ -4573,7 +4625,7 @@ CONFIG_ARM_PSCI_FW=y # CONFIG_ISCSI_IBFT is not set CONFIG_QCOM_SCM_64=y CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set +CONFIG_MSM_TZ_LOG=y # CONFIG_BIF is not set CONFIG_SENSORS_SSC=y # CONFIG_TEE is not set @@ -4990,7 +5042,7 @@ CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y CONFIG_STRICT_DEVMEM=y # CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set +CONFIG_DEBUG_SET_MODULE_RONX=y CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y # CONFIG_FORCE_PAGES is not set @@ -5177,9 +5229,9 @@ CONFIG_CRYPTO_TWOFISH_COMMON=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_LZO is not set # CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y +# CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # CONFIG_CRYPTO_ZSTD is not set diff --git a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_kddi_defconfig b/arch/arm64/configs/lineage-msm8998-yoshino-poplar_kddi_defconfig deleted file mode 100644 index cb9920433dc3..000000000000 --- a/arch/arm64/configs/lineage-msm8998-yoshino-poplar_kddi_defconfig +++ /dev/null @@ -1,5303 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 4.4.266 Kernel Configuration -# -CONFIG_ARM64=y -CONFIG_64BIT=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_BITS_MAX=24 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y -CONFIG_HAVE_GENERIC_RCU_GUP=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_SMP=y -CONFIG_HOTPLUG_SIZE_BITS=30 -CONFIG_ARM64_DMA_USE_IOMMU=y -CONFIG_ARM64_DMA_IOMMU_ALIGNMENT=9 -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=3 -# CONFIG_MSM_GVM is not set -# CONFIG_MSM_GVM_QUIN is not set -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-whatawurst" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_DEFAULT_HOSTNAME="poplar_kddi" -CONFIG_SWAP=y -# CONFIG_SYSVIPC is not set -# CONFIG_POSIX_MQUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_FHANDLE is not set -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -# CONFIG_AUDITSYSCALL is not set - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_HANDLE_DOMAIN_IRQ=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y - -# -# CPU/Task time and stats accounting -# -# CONFIG_TICK_CPU_ACCOUNTING is not set -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_SCHED_WALT=y -# CONFIG_BSD_PROCESS_ACCT is not set -CONFIG_TASKSTATS=y -# CONFIG_TASK_DELAY_ACCT is not set -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y - -# -# RCU Subsystem -# -CONFIG_PREEMPT_RCU=y -CONFIG_RCU_EXPERT=y -CONFIG_SRCU=y -# CONFIG_TASKS_RCU is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_FANOUT=64 -CONFIG_RCU_FANOUT_LEAF=16 -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -CONFIG_RCU_BOOST=y -CONFIG_RCU_KTHREAD_PRIO=1 -CONFIG_RCU_BOOST_DELAY=500 -CONFIG_RCU_NOCB_CPU=y -# CONFIG_RCU_NOCB_CPU_NONE is not set -# CONFIG_RCU_NOCB_CPU_ZERO is not set -CONFIG_RCU_NOCB_CPU_ALL=y -# CONFIG_RCU_EXPEDITE_BOOT is not set -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=18 -# CONFIG_CONSOLE_FLUSH_ON_HOTPLUG is not set -CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -# CONFIG_CGROUP_PIDS is not set -# CONFIG_CGROUP_DEVICE is not set -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_SCHEDTUNE=y -CONFIG_PAGE_COUNTER=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -# CONFIG_MEMCG_KMEM is not set -# CONFIG_CGROUP_PERF is not set -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_RT_GROUP_SCHED=y -# CONFIG_BLK_CGROUP is not set -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set -CONFIG_NET_NS=y -# CONFIG_SCHED_AUTOGROUP is not set -CONFIG_SCHED_TUNE=y -CONFIG_DEFAULT_USE_ENERGY_AWARE=y -# CONFIG_SYSFS_DEPRECATED is not set -# CONFIG_RELAY is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -# CONFIG_RD_XZ is not set -# CONFIG_RD_LZO is not set -# CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -# CONFIG_BPF_SYSCALL is not set -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_AIO_SSD_ONLY=y -CONFIG_ADVISE_SYSCALLS=y -# CONFIG_USERFAULTFD is not set -CONFIG_PCI_QUIRKS=y -# CONFIG_MEMBARRIER is not set -CONFIG_EMBEDDED=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_SLUB_DEBUG is not set -# CONFIG_COMPAT_BRK is not set -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -CONFIG_SLUB_CPU_PARTIAL=y -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y -# CONFIG_KPROBES is not set -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -# CONFIG_UPROBES is not set -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR is not set -CONFIG_CC_STACKPROTECTOR_NONE=y -# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -# CONFIG_CC_STACKPROTECTOR_STRONG is not set -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 -CONFIG_CLONE_BACKWARDS=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -# CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -# CONFIG_MODULE_SIG_SHA256 is not set -# CONFIG_MODULE_SIG_SHA384 is not set -CONFIG_MODULE_SIG_SHA512=y -CONFIG_MODULE_SIG_HASH="sha512" -# CONFIG_MODULE_COMPRESS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLK_DEV_BSG=y -# CONFIG_BLK_DEV_BSGLIB is not set -# CONFIG_BLK_DEV_INTEGRITY is not set -# CONFIG_BLK_CMDLINE_PARSER is not set -# CONFIG_BLOCK_PERF_FRAMEWORK is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_BSD_DISKLABEL is not set -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLOCK_COMPAT=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_TEST is not set -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_FREEZER=y - -# -# Platform selection -# -# CONFIG_ARCH_BCM_IPROC is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_EXYNOS7 is not set -# CONFIG_ARCH_LAYERSCAPE is not set -# CONFIG_ARCH_HISI is not set -# CONFIG_ARCH_MEDIATEK is not set -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_MSM8996 is not set -CONFIG_ARCH_MSM8998=y -CONFIG_ARCH_SONY_YOSHINO=y -# CONFIG_MACH_SONY_MAPLE is not set -# CONFIG_MACH_SONY_MAPLE_DSDS is not set -# CONFIG_MACH_SONY_POPLAR is not set -# CONFIG_MACH_SONY_POPLAR_DSDS is not set -CONFIG_MACH_SONY_POPLAR_KDDI=y -# CONFIG_MACH_SONY_LILAC is not set -CONFIG_ARCH_MSMHAMSTER=y -# CONFIG_ARCH_SDM660 is not set -# CONFIG_ARCH_SDM630 is not set -# CONFIG_ARCH_ROCKCHIP is not set -# CONFIG_ARCH_SEATTLE is not set -# CONFIG_ARCH_STRATIX10 is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -# CONFIG_ARCH_THUNDER is not set -# CONFIG_ARCH_VEXPRESS is not set -# CONFIG_ARCH_XGENE is not set -# CONFIG_ARCH_ZYNQMP is not set - -# -# Bus support -# -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCI_BUS_ADDR_T_64BIT=y -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -# CONFIG_PCI_STUB is not set -# CONFIG_PCI_IOV is not set -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -CONFIG_PCI_MSM=y -CONFIG_PCI_LABEL=y - -# -# PCI host controller drivers -# -# CONFIG_PCI_HOST_GENERIC is not set -# CONFIG_PCIE_IPROC is not set -# CONFIG_PCI_HISI is not set -# CONFIG_PCIEPORTBUS is not set -# CONFIG_HOTPLUG_PCI is not set - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_845719=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_16K_PAGES is not set -# CONFIG_ARM64_DCACHE_DISABLE is not set -# CONFIG_ARM64_ICACHE_DISABLE is not set -# CONFIG_ARM64_64K_PAGES is not set -CONFIG_ARM64_VA_BITS_39=y -# CONFIG_ARM64_VA_BITS_48 is not set -CONFIG_ARM64_VA_BITS=39 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_SCHED_MC=y -# CONFIG_SCHED_SMT is not set -CONFIG_NR_CPUS=8 -CONFIG_HOTPLUG_CPU=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_ARCH_NR_GPIO=1024 -CONFIG_QCOM_TLB_EL2_HANDLER=y -# CONFIG_PREEMPT_NONE is not set -# CONFIG_PREEMPT_VOLUNTARY is not set -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -CONFIG_HZ_300=y -# CONFIG_HZ_1000 is not set -CONFIG_HZ=300 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y -# CONFIG_PERF_EVENTS_USERMODE is not set -# CONFIG_PERF_EVENTS_RESET_PMU_DEBUGFS is not set -CONFIG_SYS_SUPPORTS_HUGETLBFS=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_NO_BOOTMEM=y -CONFIG_MEMORY_ISOLATION=y -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -# CONFIG_MEMORY_HOTPLUG is not set -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_OOM_SCORE_NOTIFIER=y -# CONFIG_KSM is not set -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -# CONFIG_TRANSPARENT_HUGEPAGE is not set -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -CONFIG_CMA_DEBUGFS=y -CONFIG_CMA_AREAS=7 -CONFIG_ZPOOL=y -# CONFIG_ZBUD is not set -CONFIG_ZSMALLOC=y -# CONFIG_PGTABLE_MAPPING is not set -# CONFIG_ZSMALLOC_STAT is not set -CONFIG_VMAP_LAZY_PURGING_FACTOR=32 -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_BALANCE_ANON_FILE_RECLAIM=y -CONFIG_KSWAPD_CPU_AFFINITY_MASK="" -# CONFIG_IDLE_PAGE_TRACKING is not set -# CONFIG_FORCE_ALLOC_FROM_DMA_ZONE is not set -# CONFIG_PROCESS_RECLAIM is not set -CONFIG_SECCOMP=y -# CONFIG_XEN is not set -CONFIG_FORCE_MAX_ZONEORDER=11 -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_ARMV8_DEPRECATED=y -CONFIG_SWP_EMULATION=y -CONFIG_CP15_BARRIER_EMULATION=y -CONFIG_SETEND_EMULATION=y -CONFIG_ARM64_SW_TTBR0_PAN=y - -# -# ARMv8.1 architectural features -# -# CONFIG_ARM64_HW_AFDBM is not set -# CONFIG_ARM64_PAN is not set -# CONFIG_ARM64_LSE_ATOMICS is not set -CONFIG_ARM64_FLUSH_CONSOLE_ON_RESTART=y -# CONFIG_ARM64_UAO is not set -CONFIG_ARM64_MODULE_CMODEL_LARGE=y -CONFIG_ARM64_MODULE_PLTS=y -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set - -# -# Boot options -# -# CONFIG_ARM64_ACPI_PARKING_PROTOCOL is not set -CONFIG_CMDLINE="" -# CONFIG_EFI is not set -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y -CONFIG_IMG_GZ_DTB=y -# CONFIG_IMG_DTB is not set -CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME="Image.gz-dtb" -CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES="" -CONFIG_ARCH_HAS_CRASH_NOTES=y -CONFIG_CRASH_NOTES=y -# CONFIG_BUILD_ARM64_DT_OVERLAY is not set - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -# CONFIG_BINFMT_MISC is not set -CONFIG_COREDUMP=y -CONFIG_COMPAT=y -CONFIG_KUSER_HELPERS=y -CONFIG_KEYS_COMPAT=y -CONFIG_COMPAT_VDSO=y -CONFIG_CROSS_COMPILE_ARM32="" - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -CONFIG_WAKELOCK=y -# CONFIG_HIBERNATION is not set -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_PM_AUTOSLEEP=y -CONFIG_PM_WAKELOCKS=y -CONFIG_PM_WAKELOCKS_LIMIT=0 -# CONFIG_PM_WAKELOCKS_GC is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_WAKEUP_TIMES=y -CONFIG_PM_OPP=y -CONFIG_PM_CLK=y -CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y -CONFIG_CPU_PM=y -# CONFIG_WAKEUP_IRQ_DEBUG is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y - -# -# ARM CPU Idle Drivers -# -# CONFIG_ARM_CPUIDLE is not set -# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set -CONFIG_CPU_FREQ_TIMES=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_BOOST is not set -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set -# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set -# CONFIG_ACPI_CPPC_CPUFREQ is not set -CONFIG_CPU_FREQ_MSM=y -CONFIG_NET=y -CONFIG_COMPAT_NETLINK_MESSAGES=y -CONFIG_NET_INGRESS=y -# CONFIG_DISABLE_NET_SKB_FRAG_CACHE is not set - -# -# Networking options -# -CONFIG_PACKET=y -# CONFIG_PACKET_DIAG is not set -CONFIG_UNIX=y -# CONFIG_UNIX_DIAG is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_IPCOMP=y -CONFIG_NET_KEY=y -# CONFIG_NET_KEY_MIGRATE is not set -CONFIG_XFRM_RFC_4868_TRUNCATION=y -CONFIG_INET=y -CONFIG_WIREGUARD=y -# CONFIG_WIREGUARD_DEBUG is not set -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -# CONFIG_IP_FIB_TRIE_STATS is not set -CONFIG_IP_MULTIPLE_TABLES=y -# CONFIG_IP_ROUTE_MULTIPATH is not set -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -# CONFIG_IP_PNP_BOOTP is not set -# CONFIG_IP_PNP_RARP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -CONFIG_NET_IP_TUNNEL=y -# CONFIG_IP_MROUTE is not set -# CONFIG_SYN_COOKIES is not set -CONFIG_NET_IPVTI=y -CONFIG_NET_UDP_TUNNEL=y -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=y -CONFIG_INET_ESP=y -CONFIG_INET_IPCOMP=y -CONFIG_INET_XFRM_TUNNEL=y -CONFIG_INET_TUNNEL=y -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -# CONFIG_INET_LRO is not set -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -CONFIG_INET_UDP_DIAG=y -CONFIG_INET_DIAG_DESTROY=y -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=y -CONFIG_INET6_TUNNEL=y -CONFIG_INET6_XFRM_MODE_TRANSPORT=y -CONFIG_INET6_XFRM_MODE_TUNNEL=y -CONFIG_INET6_XFRM_MODE_BEET=y -# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set -CONFIG_IPV6_VTI=y -CONFIG_IPV6_SIT=y -# CONFIG_IPV6_SIT_6RD is not set -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y -# CONFIG_IPV6_GRE is not set -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -# CONFIG_IPV6_MROUTE is not set -CONFIG_NETLABEL=y -CONFIG_ANDROID_PARANOID_NETWORK=y -CONFIG_NET_ACTIVITY_STATS=y -CONFIG_NETWORK_SECMARK=y -# CONFIG_NET_PTP_CLASSIFY is not set -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -# CONFIG_BRIDGE_NETFILTER is not set - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_NETLINK=y -# CONFIG_NETFILTER_NETLINK_ACCT is not set -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_COMMON=y -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -# CONFIG_NF_CONNTRACK_ZONES is not set -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -# CONFIG_NF_CONNTRACK_TIMESTAMP is not set -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -# CONFIG_NF_CONNTRACK_SNMP is not set -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -# CONFIG_NF_CONNTRACK_SIP is not set -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -# CONFIG_NF_CT_NETLINK_TIMEOUT is not set -# CONFIG_NETFILTER_NETLINK_GLUE_CT is not set -CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -# CONFIG_NF_NAT_SIP is not set -CONFIG_NF_NAT_TFTP=y -CONFIG_NF_NAT_REDIRECT=y -# CONFIG_NF_TABLES is not set -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=y -CONFIG_NETFILTER_XT_CONNMARK=y - -# -# Xtables targets -# -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set -# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_CT=y -# CONFIG_NETFILTER_XT_TARGET_DSCP is not set -# CONFIG_NETFILTER_XT_TARGET_HL is not set -# CONFIG_NETFILTER_XT_TARGET_HMARK is not set -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y -# CONFIG_NETFILTER_XT_TARGET_LED is not set -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_NAT=y -CONFIG_NETFILTER_XT_TARGET_NETMAP=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set -CONFIG_NETFILTER_XT_TARGET_REDIRECT=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set - -# -# Xtables matches -# -# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set -# CONFIG_NETFILTER_XT_MATCH_BPF is not set -# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set -# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set -# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -# CONFIG_NETFILTER_XT_MATCH_CPU is not set -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ECN=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_HL=y -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set -# CONFIG_NETFILTER_XT_MATCH_OSF is not set -# CONFIG_NETFILTER_XT_MATCH_OWNER is not set -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QTAGUID=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_QUOTA2=y -# CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG is not set -# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set -# CONFIG_NETFILTER_XT_MATCH_REALM is not set -# CONFIG_NETFILTER_XT_MATCH_RECENT is not set -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -# CONFIG_IP_SET is not set -# CONFIG_IP_VS is not set - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_CONNTRACK_PROC_COMPAT=y -CONFIG_NF_DUP_IPV4=y -# CONFIG_NF_LOG_ARP is not set -CONFIG_NF_LOG_IPV4=y -CONFIG_NF_REJECT_IPV4=y -CONFIG_NF_NAT_IPV4=y -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NF_NAT_PROTO_GRE=y -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -CONFIG_IP_NF_MATCH_RPFILTER=y -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -CONFIG_IP_NF_TARGET_NETMAP=y -CONFIG_IP_NF_TARGET_REDIRECT=y -CONFIG_IP_NF_MANGLE=y -# CONFIG_IP_NF_TARGET_CLUSTERIP is not set -# CONFIG_IP_NF_TARGET_ECN is not set -# CONFIG_IP_NF_TARGET_TTL is not set -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_SECURITY=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_DUP_IPV6=y -CONFIG_NF_REJECT_IPV6=y -CONFIG_NF_LOG_IPV6=y -# CONFIG_NF_NAT_IPV6 is not set -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_IPTABLES_128=y -# CONFIG_IP6_NF_MATCH_AH is not set -# CONFIG_IP6_NF_MATCH_EUI64 is not set -# CONFIG_IP6_NF_MATCH_FRAG is not set -# CONFIG_IP6_NF_MATCH_OPTS is not set -# CONFIG_IP6_NF_MATCH_HL is not set -# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set -# CONFIG_IP6_NF_MATCH_MH is not set -CONFIG_IP6_NF_MATCH_RPFILTER=y -# CONFIG_IP6_NF_MATCH_RT is not set -# CONFIG_IP6_NF_TARGET_HL is not set -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -# CONFIG_IP6_NF_SECURITY is not set -# CONFIG_IP6_NF_NAT is not set -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -# CONFIG_BRIDGE_EBT_T_FILTER is not set -# CONFIG_BRIDGE_EBT_T_NAT is not set -# CONFIG_BRIDGE_EBT_802_3 is not set -# CONFIG_BRIDGE_EBT_AMONG is not set -# CONFIG_BRIDGE_EBT_ARP is not set -# CONFIG_BRIDGE_EBT_IP is not set -# CONFIG_BRIDGE_EBT_IP6 is not set -# CONFIG_BRIDGE_EBT_LIMIT is not set -# CONFIG_BRIDGE_EBT_MARK is not set -# CONFIG_BRIDGE_EBT_PKTTYPE is not set -# CONFIG_BRIDGE_EBT_STP is not set -# CONFIG_BRIDGE_EBT_VLAN is not set -# CONFIG_BRIDGE_EBT_ARPREPLY is not set -# CONFIG_BRIDGE_EBT_DNAT is not set -# CONFIG_BRIDGE_EBT_MARK_T is not set -# CONFIG_BRIDGE_EBT_REDIRECT is not set -# CONFIG_BRIDGE_EBT_SNAT is not set -# CONFIG_BRIDGE_EBT_LOG is not set -# CONFIG_BRIDGE_EBT_NFLOG is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -CONFIG_L2TP=y -# CONFIG_L2TP_DEBUGFS is not set -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=y -CONFIG_L2TP_ETH=y -CONFIG_STP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -# CONFIG_NET_SCH_CBQ is not set -CONFIG_NET_SCH_HTB=y -# CONFIG_NET_SCH_HFSC is not set -CONFIG_NET_SCH_PRIO=y -CONFIG_NET_SCH_MULTIQ=y -# CONFIG_NET_SCH_RED is not set -# CONFIG_NET_SCH_SFB is not set -# CONFIG_NET_SCH_SFQ is not set -# CONFIG_NET_SCH_TEQL is not set -# CONFIG_NET_SCH_TBF is not set -# CONFIG_NET_SCH_GRED is not set -# CONFIG_NET_SCH_DSMARK is not set -# CONFIG_NET_SCH_NETEM is not set -# CONFIG_NET_SCH_DRR is not set -# CONFIG_NET_SCH_MQPRIO is not set -# CONFIG_NET_SCH_CHOKE is not set -# CONFIG_NET_SCH_QFQ is not set -# CONFIG_NET_SCH_CODEL is not set -# CONFIG_NET_SCH_FQ_CODEL is not set -# CONFIG_NET_SCH_FQ is not set -# CONFIG_NET_SCH_HHF is not set -# CONFIG_NET_SCH_PIE is not set -CONFIG_NET_SCH_INGRESS=y -# CONFIG_NET_SCH_PLUG is not set - -# -# Classification -# -CONFIG_NET_CLS=y -# CONFIG_NET_CLS_BASIC is not set -# CONFIG_NET_CLS_TCINDEX is not set -# CONFIG_NET_CLS_ROUTE4 is not set -CONFIG_NET_CLS_FW=y -CONFIG_NET_CLS_U32=y -# CONFIG_CLS_U32_PERF is not set -CONFIG_CLS_U32_MARK=y -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set -CONFIG_NET_CLS_FLOW=y -# CONFIG_NET_CLS_CGROUP is not set -# CONFIG_NET_CLS_BPF is not set -# CONFIG_NET_CLS_FLOWER is not set -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=y -CONFIG_NET_EMATCH_NBYTE=y -CONFIG_NET_EMATCH_U32=y -CONFIG_NET_EMATCH_META=y -CONFIG_NET_EMATCH_TEXT=y -CONFIG_NET_CLS_ACT=y -# CONFIG_NET_ACT_POLICE is not set -CONFIG_NET_ACT_GACT=y -# CONFIG_GACT_PROB is not set -CONFIG_NET_ACT_MIRRED=y -# CONFIG_NET_ACT_IPT is not set -# CONFIG_NET_ACT_NAT is not set -# CONFIG_NET_ACT_PEDIT is not set -# CONFIG_NET_ACT_SIMP is not set -CONFIG_NET_ACT_SKBEDIT=y -# CONFIG_NET_ACT_CSUM is not set -# CONFIG_NET_ACT_VLAN is not set -# CONFIG_NET_ACT_BPF is not set -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_CLS_IND is not set -CONFIG_NET_SCH_FIFO=y -# CONFIG_DCB is not set -# CONFIG_DNS_RESOLVER is not set -# CONFIG_BATMAN_ADV is not set -# CONFIG_OPENVSWITCH is not set -# CONFIG_VSOCKETS is not set -# CONFIG_NETLINK_DIAG is not set -# CONFIG_MPLS is not set -# CONFIG_HSR is not set -# CONFIG_NET_SWITCHDEV is not set -# CONFIG_NET_L3_MASTER_DEV is not set -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_FC=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -# CONFIG_CGROUP_NET_PRIO is not set -# CONFIG_CGROUP_NET_CLASSID is not set -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -# CONFIG_BPF_JIT is not set -CONFIG_NET_FLOW_LIMIT=y -CONFIG_SOCKEV_NLMCAST=y - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_NET_DROP_MONITOR is not set -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -CONFIG_BT=y -CONFIG_BT_BREDR=y -# CONFIG_BT_RFCOMM is not set -# CONFIG_BT_BNEP is not set -# CONFIG_BT_HIDP is not set -CONFIG_BT_HS=y -CONFIG_BT_LE=y -# CONFIG_BT_SELFTEST is not set -CONFIG_BT_DEBUGFS=y - -# -# Bluetooth device drivers -# -# CONFIG_BT_HCIBTUSB is not set -# CONFIG_BT_HCIBTSDIO is not set -# CONFIG_BT_HCIUART is not set -# CONFIG_BT_HCIBCM203X is not set -# CONFIG_BT_HCIBFUSB is not set -# CONFIG_BT_HCIVHCI is not set -# CONFIG_BT_MRVL is not set -CONFIG_MSM_BT_POWER=y -CONFIG_BTFM_SLIM=y -CONFIG_BTFM_SLIM_WCN3990=y -# CONFIG_AF_RXRPC is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WIRELESS_EXT=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y -CONFIG_WEXT_SPY=y -CONFIG_WEXT_PRIV=y -CONFIG_CFG80211=y -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_REG_DEBUG is not set -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y -# CONFIG_CFG80211_REG_RELAX_NO_IR is not set -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_INTERNAL_REGDB=y -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -# CONFIG_LIB80211 is not set -# CONFIG_MAC80211 is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -# CONFIG_WIMAX is not set -CONFIG_RFKILL=y -CONFIG_RFKILL_PM=y -CONFIG_RFKILL_LEDS=y -# CONFIG_RFKILL_INPUT is not set -# CONFIG_RFKILL_REGULATOR is not set -# CONFIG_RFKILL_GPIO is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set -# CONFIG_NFC is not set -# CONFIG_NFC_NQ is not set -# CONFIG_LWTUNNEL is not set -CONFIG_IPC_ROUTER=y -CONFIG_IPC_ROUTER_SECURITY=y -CONFIG_DST_CACHE=y -CONFIG_HAVE_BPF_JIT=y -CONFIG_HAVE_EBPF_JIT=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y -# CONFIG_TEGRA_AHB is not set - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -# CONFIG_DEVTMPFS is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -CONFIG_FIRMWARE_IN_KERNEL=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y -# CONFIG_FW_CACHE is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_SOC_BUS=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=y -CONFIG_REGMAP_SPI=y -CONFIG_REGMAP_SPMI=y -CONFIG_REGMAP_MMIO=y -CONFIG_REGMAP_SWR=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_FENCE_TRACE is not set -CONFIG_DMA_CMA=y - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=16 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 - -# -# Bus devices -# -# CONFIG_ARM_CCI400_PMU is not set -# CONFIG_ARM_CCI500_PMU is not set -# CONFIG_ARM_CCN is not set -# CONFIG_VEXPRESS_CONFIG is not set -# CONFIG_CONNECTOR is not set -# CONFIG_MTD is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_IRQ=y -CONFIG_OF_NET=y -CONFIG_OF_MDIO=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_SLIMBUS=y -# CONFIG_OF_OVERLAY is not set -CONFIG_OF_BATTERYDATA=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=y -# CONFIG_ZRAM_WRITEBACK is not set -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_BLK_CPQ_CISS_DA is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -# CONFIG_BLK_DEV_DRBD is not set -# CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_SX8 is not set -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=8192 -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_BLK_DEV_RSXX is not set -# CONFIG_BLK_DEV_NVME is not set - -# -# Misc devices -# -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_QCOM_COINCELL is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_TI_DAC7512 is not set -CONFIG_UID_STAT=y -# CONFIG_BMP085_I2C is not set -# CONFIG_BMP085_SPI is not set -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -CONFIG_QSEECOM=y -CONFIG_HDCP_QSEECOM=y -# CONFIG_PROFILER is not set -CONFIG_UID_SYS_STATS=y -CONFIG_QPNP_MISC=y -# CONFIG_UID_SYS_STATS_DEBUG is not set -CONFIG_MEMORY_STATE_TIME=y -CONFIG_RAMDUMP_TAGS=y -CONFIG_RAMDUMP_MEMDESC=y -CONFIG_POWERKEY_FORCECRASH=y -# CONFIG_CXD224X_NFC is not set -# CONFIG_BD7602_POWER_IC is not set -# CONFIG_ONESEG_TUNER_SMTVJ19X is not set -# CONFIG_NFC_PN553_DEVICES is not set -CONFIG_LDO_VIBRATOR=y -CONFIG_TOF_SENSOR=y -CONFIG_SENSORS_TCS3490=y -CONFIG_SIM_DETECT=y -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -# CONFIG_EEPROM_93CX6 is not set -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_CB710_CORE is not set - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module -# -# CONFIG_ALTERA_STAPL is not set -CONFIG_MSM_QDSP6V2_CODECS=y -CONFIG_MSM_ULTRASOUND=y - -# -# Intel MIC Bus Driver -# - -# -# SCIF Bus Driver -# - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# - -# -# SCIF Driver -# - -# -# Intel MIC Coprocessor State Management (COSM) Drivers -# -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_CXL_BASE is not set -# CONFIG_CXL_KERNEL_API is not set -# CONFIG_CXL_EEH is not set -CONFIG_MMTUNER_MN8855x=y -# CONFIG_MMTUNER_DEBUG is not set - -# -# Sony Carillon NFC driver -# -CONFIG_NFC_CARILLON=y - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -# CONFIG_RAID_ATTRS is not set -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -# CONFIG_SCSI_NETLINK is not set -# CONFIG_SCSI_MQ_DEFAULT is not set -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -# CONFIG_SCSI_SPI_ATTRS is not set -# CONFIG_SCSI_FC_ATTRS is not set -# CONFIG_SCSI_ISCSI_ATTRS is not set -# CONFIG_SCSI_SAS_ATTRS is not set -# CONFIG_SCSI_SAS_LIBSAS is not set -# CONFIG_SCSI_SRP_ATTRS is not set -CONFIG_SCSI_LOWLEVEL=y -# CONFIG_ISCSI_TCP is not set -# CONFIG_ISCSI_BOOT_SYSFS is not set -# CONFIG_SCSI_CXGB3_ISCSI is not set -# CONFIG_SCSI_CXGB4_ISCSI is not set -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_BE2ISCSI is not set -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -# CONFIG_SCSI_HPSA is not set -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -# CONFIG_MEGARAID_SAS is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_MPT2SAS is not set -CONFIG_SCSI_UFSHCD=y -# CONFIG_SCSI_UFSHCD_PCI is not set -CONFIG_SCSI_UFSHCD_PLATFORM=y -CONFIG_SCSI_UFS_QCOM=y -CONFIG_SCSI_UFS_QCOM_ICE=y -CONFIG_SCSI_UFS_RESTRICT_TX_LANES=y -# CONFIG_SCSI_UFSHCD_CMD_LOGGING is not set -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FUTURE_DOMAIN is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -# CONFIG_SCSI_QLA_ISCSI is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set -# CONFIG_SCSI_DH is not set -# CONFIG_SCSI_OSD_INITIATOR is not set -CONFIG_HAVE_PATA_PLATFORM=y -# CONFIG_ATA is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_LINEAR=y -# CONFIG_MD_RAID0 is not set -# CONFIG_MD_RAID1 is not set -# CONFIG_MD_RAID10 is not set -# CONFIG_MD_RAID456 is not set -# CONFIG_MD_MULTIPATH is not set -# CONFIG_MD_FAULTY is not set -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=y -# CONFIG_DM_MQ_DEFAULT is not set -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=y -CONFIG_DM_CRYPT=y -CONFIG_DM_REQ_CRYPT=y -# CONFIG_DM_SNAPSHOT is not set -# CONFIG_DM_THIN_PROVISIONING is not set -# CONFIG_DM_CACHE is not set -# CONFIG_DM_ERA is not set -# CONFIG_DM_MIRROR is not set -# CONFIG_DM_RAID is not set -# CONFIG_DM_ZERO is not set -# CONFIG_DM_MULTIPATH is not set -# CONFIG_DM_DELAY is not set -CONFIG_DM_UEVENT=y -# CONFIG_DM_FLAKEY is not set -CONFIG_DM_VERITY=y -CONFIG_DM_VERITY_FEC=y -# CONFIG_DM_SWITCH is not set -# CONFIG_DM_LOG_WRITES is not set -# CONFIG_DM_VERITY_AVB is not set -CONFIG_DM_ANDROID_VERITY=y -# CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED is not set -CONFIG_PANIC_ON_DM_VERITY_ERRORS=y -# CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -CONFIG_BONDING=y -CONFIG_DUMMY=y -# CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set -# CONFIG_IFB is not set -# CONFIG_NET_TEAM is not set -# CONFIG_MACVLAN is not set -# CONFIG_IPVLAN is not set -# CONFIG_VXLAN is not set -# CONFIG_GENEVE is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -CONFIG_TUN=y -# CONFIG_TUN_VNET_CROSS_LE is not set -# CONFIG_VETH is not set -# CONFIG_NLMON is not set -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -# CONFIG_NET_DSA_MV88E6XXX is not set -# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set -CONFIG_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y -# CONFIG_VORTEX is not set -# CONFIG_TYPHOON is not set -CONFIG_NET_VENDOR_ADAPTEC=y -# CONFIG_ADAPTEC_STARFIRE is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -CONFIG_NET_VENDOR_ALTEON=y -# CONFIG_ACENIC is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -# CONFIG_AMD_XGBE is not set -CONFIG_NET_VENDOR_ARC=y -# CONFIG_ARC_EMAC is not set -# CONFIG_EMAC_ROCKCHIP is not set -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -# CONFIG_ATL1 is not set -# CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set -# CONFIG_ALX is not set -# CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_CADENCE=y -# CONFIG_MACB is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -# CONFIG_BNX2 is not set -# CONFIG_CNIC is not set -# CONFIG_TIGON3 is not set -# CONFIG_BNX2X is not set -# CONFIG_SYSTEMPORT is not set -# CONFIG_BNXT is not set -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_LIQUIDIO is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -# CONFIG_CHELSIO_T4 is not set -# CONFIG_CHELSIO_T4VF is not set -CONFIG_NET_VENDOR_CISCO=y -# CONFIG_ENIC is not set -# CONFIG_DNET is not set -CONFIG_NET_VENDOR_DEC=y -# CONFIG_NET_TULIP is not set -CONFIG_NET_VENDOR_DLINK=y -# CONFIG_DL2K is not set -# CONFIG_SUNDANCE is not set -CONFIG_NET_VENDOR_EMULEX=y -# CONFIG_BE2NET is not set -CONFIG_NET_VENDOR_EZCHIP=y -# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set -CONFIG_NET_VENDOR_EXAR=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HIP04_ETH is not set -# CONFIG_HNS is not set -# CONFIG_HNS_DSAF is not set -# CONFIG_HNS_ENET is not set -CONFIG_NET_VENDOR_HP=y -# CONFIG_HP100 is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -# CONFIG_E1000 is not set -# CONFIG_E1000E is not set -# CONFIG_IGB is not set -# CONFIG_IGBVF is not set -# CONFIG_IXGB is not set -# CONFIG_IXGBE is not set -# CONFIG_IXGBEVF is not set -# CONFIG_I40E is not set -# CONFIG_I40EVF is not set -# CONFIG_FM10K is not set -CONFIG_NET_VENDOR_I825XX=y -# CONFIG_JME is not set -CONFIG_NET_VENDOR_MARVELL=y -# CONFIG_MVMDIO is not set -# CONFIG_SKGE is not set -CONFIG_SKY2=y -# CONFIG_SKY2_DEBUG is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -# CONFIG_MLX4_CORE is not set -# CONFIG_MLX5_CORE is not set -# CONFIG_MLXSW_CORE is not set -CONFIG_NET_VENDOR_MICREL=y -# CONFIG_KS8842 is not set -# CONFIG_KS8851 is not set -# CONFIG_KS8851_MLL is not set -# CONFIG_KSZ884X_PCI is not set -CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_ENC28J60 is not set -# CONFIG_ENCX24J600 is not set -CONFIG_MSM_RMNET_MHI=y -# CONFIG_ECM_IPA is not set -CONFIG_RNDIS_IPA=y -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -CONFIG_NET_VENDOR_NATSEMI=y -# CONFIG_NATSEMI is not set -# CONFIG_NS83820 is not set -CONFIG_NET_VENDOR_8390=y -# CONFIG_NE2K_PCI is not set -CONFIG_NET_VENDOR_NVIDIA=y -# CONFIG_FORCEDETH is not set -CONFIG_NET_VENDOR_OKI=y -# CONFIG_ETHOC is not set -CONFIG_NET_PACKET_ENGINE=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_QLOGIC=y -# CONFIG_QLA3XXX is not set -# CONFIG_QLCNIC is not set -# CONFIG_QLGE is not set -# CONFIG_NETXEN_NIC is not set -# CONFIG_QED is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCA7000 is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -# CONFIG_R8169 is not set -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_RDC=y -# CONFIG_R6040 is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -CONFIG_NET_VENDOR_SEEQ=y -CONFIG_NET_VENDOR_SILAN=y -# CONFIG_SC92031 is not set -CONFIG_NET_VENDOR_SIS=y -# CONFIG_SIS900 is not set -# CONFIG_SIS190 is not set -# CONFIG_SFC is not set -CONFIG_NET_VENDOR_SMSC=y -# CONFIG_SMC91X is not set -# CONFIG_EPIC100 is not set -CONFIG_SMSC911X=y -# CONFIG_SMSC911X_ARCH_HOOKS is not set -# CONFIG_SMSC9420 is not set -CONFIG_NET_VENDOR_STMICRO=y -# CONFIG_STMMAC_ETH is not set -CONFIG_NET_VENDOR_SUN=y -# CONFIG_HAPPYMEAL is not set -# CONFIG_SUNGEM is not set -# CONFIG_CASSINI is not set -# CONFIG_NIU is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set -CONFIG_NET_VENDOR_TEHUTI=y -# CONFIG_TEHUTI is not set -CONFIG_NET_VENDOR_TI=y -# CONFIG_TI_CPSW_ALE is not set -# CONFIG_TLAN is not set -CONFIG_NET_VENDOR_VIA=y -# CONFIG_VIA_RHINE is not set -# CONFIG_VIA_VELOCITY is not set -CONFIG_NET_VENDOR_WIZNET=y -# CONFIG_WIZNET_W5100 is not set -# CONFIG_WIZNET_W5300 is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -# CONFIG_AQUANTIA_PHY is not set -# CONFIG_AT803X_PHY is not set -# CONFIG_AMD_PHY is not set -# CONFIG_MARVELL_PHY is not set -# CONFIG_DAVICOM_PHY is not set -# CONFIG_QSEMI_PHY is not set -# CONFIG_LXT_PHY is not set -# CONFIG_CICADA_PHY is not set -# CONFIG_VITESSE_PHY is not set -# CONFIG_TERANETICS_PHY is not set -# CONFIG_SMSC_PHY is not set -# CONFIG_BROADCOM_PHY is not set -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_ICPLUS_PHY is not set -# CONFIG_REALTEK_PHY is not set -# CONFIG_NATIONAL_PHY is not set -# CONFIG_STE10XP is not set -# CONFIG_LSI_ET1011C_PHY is not set -# CONFIG_MICREL_PHY is not set -# CONFIG_DP83848_PHY is not set -# CONFIG_DP83867_PHY is not set -# CONFIG_MICROCHIP_PHY is not set -# CONFIG_FIXED_PHY is not set -# CONFIG_MDIO_BITBANG is not set -# CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MICREL_KS8995MA is not set -CONFIG_PPP=y -CONFIG_PPP_BSDCOMP=y -CONFIG_PPP_DEFLATE=y -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=y -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOE=y -CONFIG_PPPOL2TP=y -CONFIG_PPPOLAC=y -CONFIG_PPPOPNS=y -CONFIG_PPP_ASYNC=y -CONFIG_PPP_SYNC_TTY=y -# CONFIG_SLIP is not set -CONFIG_SLHC=y -# CONFIG_USB_NET_DRIVERS is not set -CONFIG_WLAN=y -# CONFIG_ATMEL is not set -# CONFIG_PRISM54 is not set -# CONFIG_USB_ZD1201 is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -# CONFIG_VIRT_WIFI is not set -# CONFIG_WIFI_CONTROL_FUNC is not set -# CONFIG_WCNSS_CORE is not set -CONFIG_WCNSS_MEM_PRE_ALLOC=y -# CONFIG_CNSS_CRYPTO is not set -# CONFIG_CNSS_QCA6290 is not set -CONFIG_ATH_CARDS=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS is not set -# CONFIG_ATH5K_PCI is not set -# CONFIG_ATH6KL is not set -# CONFIG_WIL6210 is not set -# CONFIG_BRCMFMAC is not set -# CONFIG_HOSTAP is not set -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_LIBERTAS is not set -# CONFIG_HERMES is not set -# CONFIG_WL_MEDIATEK is not set -# CONFIG_WL_TI is not set -# CONFIG_MWIFIEX is not set -# CONFIG_CNSS is not set -# CONFIG_CLD_DEBUG is not set -# CONFIG_CLD_HL_SDIO_CORE is not set -CONFIG_CLD_LL_CORE=y -# CONFIG_CNSS2 is not set -CONFIG_CNSS_GENL=y -CONFIG_CNSS_UTILS=y - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -# CONFIG_RMNET is not set -# CONFIG_ISDN is not set -# CONFIG_NVM is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set -# CONFIG_INPUT_SPARSEKMAP is not set -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set -CONFIG_INPUT_KEYRESET=y -CONFIG_INPUT_KEYCOMBO=y - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=y -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_JOYSTICK=y -# CONFIG_JOYSTICK_ANALOG is not set -# CONFIG_JOYSTICK_A3D is not set -# CONFIG_JOYSTICK_ADI is not set -# CONFIG_JOYSTICK_COBRA is not set -# CONFIG_JOYSTICK_GF2K is not set -# CONFIG_JOYSTICK_GRIP is not set -# CONFIG_JOYSTICK_GRIP_MP is not set -# CONFIG_JOYSTICK_GUILLEMOT is not set -# CONFIG_JOYSTICK_INTERACT is not set -# CONFIG_JOYSTICK_SIDEWINDER is not set -# CONFIG_JOYSTICK_TMDC is not set -# CONFIG_JOYSTICK_IFORCE is not set -# CONFIG_JOYSTICK_WARRIOR is not set -# CONFIG_JOYSTICK_MAGELLAN is not set -# CONFIG_JOYSTICK_SPACEORB is not set -# CONFIG_JOYSTICK_SPACEBALL is not set -# CONFIG_JOYSTICK_STINGER is not set -# CONFIG_JOYSTICK_TWIDJOY is not set -# CONFIG_JOYSTICK_ZHENHUA is not set -# CONFIG_JOYSTICK_AS5011 is not set -# CONFIG_JOYSTICK_JOYDUMP is not set -CONFIG_JOYSTICK_XPAD=y -# CONFIG_JOYSTICK_XPAD_FF is not set -# CONFIG_JOYSTICK_XPAD_LEDS is not set -# CONFIG_INPUT_TABLET is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_v21=y -CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_I2C_v21=y -# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_SPI_v21 is not set -CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y -CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y -CONFIG_OF_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_AR1021_I2C is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_ATMEL_MAXTOUCH_TS is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX is not set -# CONFIG_TOUCHSCREEN_FT6236 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GOODIX is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_ELAN is not set -# CONFIG_TOUCHSCREEN_ELO is not set -# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set -# CONFIG_TOUCHSCREEN_WACOM_I2C is not set -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_MK712 is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -CONFIG_TOUCHSCREEN_CLEARPAD=y -CONFIG_TOUCHSCREEN_CLEARPAD_I2C=y -CONFIG_TOUCHSCREEN_CLEARPAD_RMI_DEV=y -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_SUR40 is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_MAXIM_STI is not set -CONFIG_SECURE_TOUCH=y -# CONFIG_TOUCHSCREEN_GEN_VKEYS is not set -# CONFIG_TOUCHSCREEN_FT5X06 is not set -# CONFIG_TOUCHSCREEN_IT7260_I2C is not set -CONFIG_TOUCHSCREEN_ST=y -CONFIG_TOUCHSCREEN_ST_I2C=y -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_BU520X1NVX=y -# CONFIG_INPUT_HBTP_INPUT is not set -# CONFIG_INPUT_PM8941_PWRKEY is not set -CONFIG_INPUT_QPNP_POWER_ON=y -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_MPU3050 is not set -# CONFIG_INPUT_GP2A is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_TILT_POLLED is not set -# CONFIG_INPUT_ATI_REMOTE2 is not set -# CONFIG_INPUT_KEYCHORD is not set -# CONFIG_INPUT_KEYSPAN_REMOTE is not set -# CONFIG_INPUT_KXTJ9 is not set -# CONFIG_INPUT_POWERMATE is not set -# CONFIG_INPUT_YEALINK is not set -# CONFIG_INPUT_CM109 is not set -# CONFIG_INPUT_REGULATOR_HAPTIC is not set -CONFIG_INPUT_UINPUT=y -# CONFIG_INPUT_GPIO is not set -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -# CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH is not set -CONFIG_INPUT_STMVL53L0=y -CONFIG_FPC1145_PLATFORM=y -# CONFIG_INPUT_ADUX1050 is not set -# CONFIG_BOSCH_DRIVER_LOG_FUNC is not set -# CONFIG_SENSORS_BMA2X2 is not set -# CONFIG_SENSORS_BMG is not set -# CONFIG_SENSORS_YAS537 is not set -# CONFIG_SENSORS_BMM050 is not set -# CONFIG_SENSORS_AKM09911 is not set -# CONFIG_SENSORS_AKM09912 is not set -# CONFIG_SENSORS_BMA420 is not set -# CONFIG_SENSORS_BMA421 is not set -# CONFIG_SENSORS_BMA422 is not set -# CONFIG_SENSORS_BMA455 is not set - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -# CONFIG_SERIO_SERPORT is not set -# CONFIG_SERIO_AMBAKMI is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -# CONFIG_SERIO_RAW is not set -# CONFIG_SERIO_ALTERA_PS2 is not set -# CONFIG_SERIO_PS2MULT is not set -# CONFIG_SERIO_ARC_PS2 is not set -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -# CONFIG_VT is not set -CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -# CONFIG_LEGACY_PTYS is not set -# CONFIG_SERIAL_NONSTANDARD is not set -# CONFIG_NOZOMI is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -CONFIG_LDISC_AUTOLOAD=y -# CONFIG_DEVMEM is not set -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -# CONFIG_SERIAL_8250 is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -# CONFIG_SERIAL_AMBA_PL011 is not set -# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -# CONFIG_SERIAL_JSM is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_SERIAL_MSM_HS=y -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_IFX6X60 is not set -CONFIG_SERIAL_MSM_SMD=y -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set - -# -# Diag Support -# -CONFIG_DIAG_CHAR=y - -# -# DIAG traffic over USB -# -CONFIG_DIAG_OVER_USB=y - -# -# HSIC/SMUX support for DIAG -# -CONFIG_DIAGFWD_BRIDGE_CODE=y -# CONFIG_TTY_PRINTK is not set -# CONFIG_HVC_DCC is not set -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -CONFIG_HW_RANDOM_MSM_LEGACY=y -# CONFIG_HW_RANDOM_MSM is not set -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -# CONFIG_RAW_DRIVER is not set -# CONFIG_HPET is not set -# CONFIG_TCG_TPM is not set -# CONFIG_DEVPORT is not set -# CONFIG_MSM_SMD_PKT is not set -# CONFIG_XILLYBUS is not set -CONFIG_MSM_ADSPRPC=y -# CONFIG_MSM_RDBG is not set - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -# CONFIG_I2C_MUX_PINCTRL is not set -# CONFIG_I2C_MUX_REG is not set -CONFIG_I2C_HELPER_AUTO=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PCA_PLATFORM is not set -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_QUP is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_MSM_V2=y -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SLIMBUS=y -# CONFIG_SLIMBUS_MSM_CTRL is not set -CONFIG_SLIMBUS_MSM_NGD=y -CONFIG_SOUNDWIRE=y -CONFIG_SOUNDWIRE_WCD_CTRL=y -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PL022 is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_PXA2XX_PCI is not set -# CONFIG_SPI_ROCKCHIP is not set -CONFIG_SPI_QUP=y -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set -# CONFIG_SPI_DESIGNWARE is not set - -# -# SPI Protocol Masters -# -CONFIG_SPI_SPIDEV=y -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB=y -CONFIG_VIRTSPMI_MSM_PMIC_ARB=y -# CONFIG_HSI is not set - -# -# PPS support -# -# CONFIG_PPS is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -# CONFIG_PTP_1588_CLOCK is not set - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# -CONFIG_PINCTRL=y - -# -# Pin controllers -# -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_BAYTRAIL is not set -# CONFIG_PINCTRL_CHERRYVIEW is not set -# CONFIG_PINCTRL_BROXTON is not set -# CONFIG_PINCTRL_SUNRISEPOINT is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ8064 is not set -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MSM8X74 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_QDF2XXX is not set -# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -CONFIG_PINCTRL_MSM8998=y -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_SDM660 is not set -CONFIG_PINCTRL_WCD=y -# CONFIG_PINCTRL_LPI is not set -CONFIG_PINCTRL_SOMC=y -CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_DEVRES=y -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -# CONFIG_GPIO_AMDPT is not set -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_PL061 is not set -CONFIG_GPIO_QPNP_PIN=y -# CONFIG_GPIO_QPNP_PIN_DEBUG is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_XGENE is not set -# CONFIG_GPIO_XILINX is not set -# CONFIG_GPIO_ZX is not set - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_SX150X is not set - -# -# MFD GPIO expanders -# - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set - -# -# SPI or I2C GPIO expanders -# -# CONFIG_GPIO_MCP23S08 is not set - -# -# USB GPIO expanders -# -# CONFIG_W1 is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_GENERIC_ADC_BATTERY is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_ISP1704 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_QCOM_SMBB is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_BATTERY_BQ28400 is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_CHARGER_RT9455 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -# CONFIG_POWER_RESET_LTC2952 is not set -CONFIG_POWER_RESET_QCOM=y -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_QCOM_PRESERVE_MEM=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_RESET_XGENE=y -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set - -# -# Qualcomm Technologies Inc Charger and Fuel Gauge support -# -# CONFIG_QPNP_SMBCHARGER is not set -# CONFIG_QPNP_FG is not set -CONFIG_QPNP_FG_GEN3=y -# CONFIG_SMB135X_CHARGER is not set -# CONFIG_SMB1351_USB_CHARGER is not set -CONFIG_MSM_BCL_CTL=y -CONFIG_MSM_BCL_PERIPHERAL_CTL=y -CONFIG_BATTERY_BCL=y -CONFIG_QPNP_SMB2=y -CONFIG_SMB138X_CHARGER=y -# CONFIG_QPNP_QNOVO is not set -CONFIG_QNS_SYSTEM=y -CONFIG_SOMC_CHARGER_EXTENSION=y -# CONFIG_POWER_AVS is not set -CONFIG_MSM_PM=y -CONFIG_APSS_CORE_EA=y -CONFIG_MSM_APM=y -CONFIG_MSM_IDLE_STATS=y -CONFIG_MSM_IDLE_STATS_FIRST_BUCKET=62500 -CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT=2 -CONFIG_MSM_IDLE_STATS_BUCKET_COUNT=10 -CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET=1000000000 -CONFIG_HWMON=y -# CONFIG_HWMON_VID is not set -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_AD7314 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_IIO_HWMON is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -# CONFIG_SENSORS_LTC4222 is not set -# CONFIG_SENSORS_LTC4245 is not set -# CONFIG_SENSORS_LTC4260 is not set -# CONFIG_SENSORS_LTC4261 is not set -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_HTU21 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_EPM_ADC is not set -CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y -# CONFIG_SENSORS_QPNP_ADC_CURRENT is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_PWM_FAN is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_ADS7871 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_TC74 is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -# CONFIG_THERMAL_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set -CONFIG_CPU_THERMAL=y -# CONFIG_DEVFREQ_THERMAL is not set -# CONFIG_THERMAL_EMULATION is not set -CONFIG_LIMITS_MONITOR=y -CONFIG_LIMITS_LITE_HW=y -CONFIG_THERMAL_MONITOR=y -CONFIG_THERMAL_TSENS8974=y -# CONFIG_IMX_THERMAL is not set -CONFIG_THERMAL_QPNP=y -CONFIG_THERMAL_QPNP_ADC_TM=y -CONFIG_QCOM_THERMAL_LIMITS_DCVS=y -# CONFIG_QCOM_SPMI_TEMP_ALARM is not set -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -# CONFIG_BCMA is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_AXP20X is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_INTEL_SOC_PMIC is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_QCOM_RPM is not set -CONFIG_MFD_SPMI_PMIC=y -CONFIG_MFD_I2C_PMIC=y -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RTSX_USB is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK808 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -CONFIG_MSM_CDC_PINCTRL=y -CONFIG_MSM_CDC_SUPPLY=y -CONFIG_WCD9XXX_CODEC_UTIL=y -# CONFIG_WCD9330_CODEC is not set -CONFIG_WCD9335_CODEC=y -CONFIG_WCD934X_CODEC=y -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=y -# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_ANATOP is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -# CONFIG_REGULATOR_FAN53555 is not set -# CONFIG_REGULATOR_MSM_GFX_LDO is not set -# CONFIG_REGULATOR_GPIO is not set -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX20010 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_MT6311 is not set -# CONFIG_REGULATOR_ONSEMI_NCP6335D is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_PWM is not set -# CONFIG_REGULATOR_QCOM_SPMI is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS6524X is not set -CONFIG_REGULATOR_RPM_SMD=y -CONFIG_REGULATOR_QPNP=y -CONFIG_REGULATOR_QPNP_LABIBB=y -CONFIG_REGULATOR_QPNP_LCDB=y -# CONFIG_REGULATOR_QPNP_OLEDB is not set -CONFIG_REGULATOR_SPM=y -# CONFIG_REGULATOR_CPR is not set -# CONFIG_REGULATOR_CPR2_GFX is not set -CONFIG_REGULATOR_CPR3=y -CONFIG_REGULATOR_CPR3_HMSS=y -CONFIG_REGULATOR_CPR3_MMSS=y -# CONFIG_REGULATOR_CPR4_APSS is not set -CONFIG_REGULATOR_CPRH_KBSS=y -# CONFIG_REGULATOR_CPR4_MMSS_LDO is not set -# CONFIG_REGULATOR_KRYO is not set -CONFIG_REGULATOR_MEM_ACC=y -CONFIG_REGULATOR_PROXY_CONSUMER=y -CONFIG_REGULATOR_STUB=y -CONFIG_SOMC_LCD_OCP_ENABLED=y -CONFIG_MEDIA_SUPPORT=y - -# -# Multimedia core support -# -CONFIG_MEDIA_CAMERA_SUPPORT=y -# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -# CONFIG_MEDIA_RADIO_SUPPORT is not set -# CONFIG_MEDIA_SDR_SUPPORT is not set -# CONFIG_MEDIA_RC_SUPPORT is not set -# CONFIG_MEDIA_CEC_SUPPORT is not set -CONFIG_MEDIA_CONTROLLER=y -CONFIG_VIDEO_DEV=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -CONFIG_VIDEO_V4L2=y -CONFIG_VIDEO_ADV_DEBUG=y -CONFIG_VIDEO_FIXED_MINOR_RANGES=y -CONFIG_V4L2_MEM2MEM_DEV=y -CONFIG_VIDEOBUF2_CORE=y -# CONFIG_V4L2_LOOPBACK is not set -CONFIG_DVB_CORE=y -CONFIG_DVB_NET=y -# CONFIG_TTPCI_EEPROM is not set -CONFIG_DVB_MAX_ADAPTERS=8 -# CONFIG_DVB_DYNAMIC_MINORS is not set - -# -# Media drivers -# -CONFIG_MEDIA_USB_SUPPORT=y - -# -# Webcam devices -# -# CONFIG_USB_VIDEO_CLASS is not set -# CONFIG_USB_GSPCA is not set -# CONFIG_USB_PWC is not set -# CONFIG_VIDEO_CPIA2 is not set -# CONFIG_USB_ZR364XX is not set -# CONFIG_USB_STKWEBCAM is not set -# CONFIG_USB_S2255 is not set -# CONFIG_VIDEO_USBTV is not set - -# -# Analog/digital TV USB devices -# -# CONFIG_VIDEO_AU0828 is not set - -# -# Digital TV USB devices -# -# CONFIG_DVB_USB_V2 is not set -# CONFIG_DVB_TTUSB_BUDGET is not set -# CONFIG_DVB_TTUSB_DEC is not set -# CONFIG_SMS_USB_DRV is not set -# CONFIG_DVB_B2C2_FLEXCOP_USB is not set -# CONFIG_DVB_AS102 is not set - -# -# Webcam, TV (analog/digital) USB devices -# -# CONFIG_VIDEO_EM28XX is not set -# CONFIG_MEDIA_PCI_SUPPORT is not set -CONFIG_V4L_PLATFORM_DRIVERS=y -# CONFIG_VIDEO_CAFE_CCIC is not set -# CONFIG_SOC_CAMERA is not set -# CONFIG_VIDEO_XILINX is not set -# CONFIG_V4L_MEM2MEM_DRIVERS is not set -# CONFIG_V4L_TEST_DRIVERS is not set -# CONFIG_DVB_PLATFORM_DRIVERS is not set - -# -# QTI MSM Camera And Video & AIS -# -CONFIG_MSM_CAMERA=y -# CONFIG_MSM_CAMERA_DEBUG is not set -CONFIG_MSMB_CAMERA=y -# CONFIG_MSMB_CAMERA_DEBUG is not set -CONFIG_MSM_CAMERA_SENSOR=y -CONFIG_MSM_CPP=y -CONFIG_MSM_CCI=y -CONFIG_MSM_CSI20_HEADER=y -CONFIG_MSM_CSI22_HEADER=y -CONFIG_MSM_CSI30_HEADER=y -CONFIG_MSM_CSI31_HEADER=y -CONFIG_MSM_CSIPHY=y -CONFIG_MSM_CSID=y -CONFIG_MSM_EEPROM=y -CONFIG_MSM_ISPIF=y -# CONFIG_MSM_ISPIF_V1 is not set -# CONFIG_MSM_ISPIF_V2 is not set -CONFIG_IMX134=y -CONFIG_IMX132=y -CONFIG_OV9724=y -CONFIG_OV5648=y -CONFIG_GC0339=y -CONFIG_OV8825=y -CONFIG_OV8865=y -CONFIG_s5k4e1=y -CONFIG_OV12830=y -CONFIG_SONY_CAM_V4L2=y -CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y -CONFIG_MSMB_JPEG=y -CONFIG_MSM_FD=y -CONFIG_MSM_JPEGDMA=y -CONFIG_MSM_SEC_CCI_TA_NAME="seccamdemo64" -# CONFIG_MSM_SEC_CCI_DEBUG is not set -CONFIG_MSM_VIDC_V4L2=y -CONFIG_MSM_VIDC_VMEM=y -CONFIG_MSM_VIDC_GOVERNORS=y -CONFIG_MSM_SDE_ROTATOR=y -# CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG is not set -# CONFIG_MSM_AIS is not set -# CONFIG_DVB_MPQ is not set -# CONFIG_TSPP is not set - -# -# Supported MMC/SDIO adapters -# -# CONFIG_SMS_SDIO_DRV is not set -# CONFIG_CYPRESS_FIRMWARE is not set - -# -# Media ancillary drivers (tuners, sensors, i2c, frontends) -# -CONFIG_MEDIA_SUBDRV_AUTOSELECT=y -CONFIG_MEDIA_ATTACH=y - -# -# Audio decoders, processors and mixers -# - -# -# RDS decoders -# - -# -# Video decoders -# - -# -# Video and audio decoders -# - -# -# Video encoders -# - -# -# Camera sensor devices -# - -# -# Flash devices -# - -# -# Video improvement chips -# - -# -# Audio/Video compression chips -# - -# -# Miscellaneous helper chips -# - -# -# Sensors used on soc_camera driver -# -CONFIG_MEDIA_TUNER=y -CONFIG_MEDIA_TUNER_SIMPLE=y -CONFIG_MEDIA_TUNER_TDA8290=y -CONFIG_MEDIA_TUNER_TDA827X=y -CONFIG_MEDIA_TUNER_TDA18271=y -CONFIG_MEDIA_TUNER_TDA9887=y -CONFIG_MEDIA_TUNER_MT20XX=y -CONFIG_MEDIA_TUNER_XC2028=y -CONFIG_MEDIA_TUNER_XC5000=y -CONFIG_MEDIA_TUNER_XC4000=y -CONFIG_MEDIA_TUNER_MC44S803=y - -# -# Multistandard (satellite) frontends -# - -# -# Multistandard (cable + terrestrial) frontends -# - -# -# DVB-S (satellite) frontends -# - -# -# DVB-T (terrestrial) frontends -# -# CONFIG_DVB_AS102_FE is not set - -# -# DVB-C (cable) frontends -# - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# - -# -# ISDB-T (terrestrial) frontends -# - -# -# ISDB-S (satellite) & ISDB-T (terrestrial) frontends -# - -# -# Digital terrestrial only tuners/PLL -# - -# -# SEC control devices for DVB-S -# - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set - -# -# Graphics support -# -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 -CONFIG_QCOM_KGSL=y -# CONFIG_QCOM_KGSL_CFF_DUMP is not set -CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR="msm-adreno-tz" -CONFIG_QCOM_KGSL_IOMMU=y -# CONFIG_DRM is not set -# CONFIG_MSM_BA_V4L2 is not set - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CMDLINE=y -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -# CONFIG_FB_SYS_FOPS is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -CONFIG_FB_MODE_HELPERS=y -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -CONFIG_FB_ARMCLCD=y -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_MSM=y -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_AUO_K190X is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_MSM_DBA is not set -CONFIG_FB_MSM_MDSS_COMMON=y -# CONFIG_FB_MSM_MDP is not set -CONFIG_FB_MSM_MDSS=y -# CONFIG_FB_MSM_MDP_NONE is not set -# CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL is not set -# CONFIG_FB_MSM_QPIC_PANEL_DETECT is not set -CONFIG_FB_MSM_MDSS_WRITEBACK=y -CONFIG_FB_MSM_MDSS_SPECIFIC_PANEL=y -CONFIG_FB_MSM_MDSS_HDMI_PANEL=y -# CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334 is not set -# CONFIG_FB_MSM_MDSS_MHL3 is not set -# CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS is not set -CONFIG_FB_MSM_MDSS_DP_PANEL=y -# CONFIG_FB_MSM_MDSS_MDP3 is not set -CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set -# CONFIG_ADF is not set -# CONFIG_VGASTATE is not set -CONFIG_VIDEOMODE_HELPERS=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -CONFIG_SOUND=y -# CONFIG_SOUND_OSS_CORE is not set -CONFIG_SND=y -CONFIG_SND_TIMER=y -CONFIG_SND_PCM=y -CONFIG_SND_HWDEP=y -CONFIG_SND_RAWMIDI=y -CONFIG_SND_COMPRESS_OFFLOAD=y -CONFIG_SND_JACK=y -# CONFIG_SND_SEQUENCER is not set -# CONFIG_SND_MIXER_OSS is not set -# CONFIG_SND_PCM_OSS is not set -CONFIG_SND_PCM_TIMER=y -# CONFIG_SND_HRTIMER is not set -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_MAX_CARDS=32 -CONFIG_SND_SUPPORT_OLD_API=y -CONFIG_SND_PROC_FS=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -# CONFIG_SND_DEBUG is not set -# CONFIG_SND_RAWMIDI_SEQ is not set -# CONFIG_SND_OPL3_LIB_SEQ is not set -# CONFIG_SND_OPL4_LIB_SEQ is not set -# CONFIG_SND_SBAWE_SEQ is not set -# CONFIG_SND_EMU10K1_SEQ is not set -CONFIG_SND_DRIVERS=y -# CONFIG_SND_DUMMY is not set -# CONFIG_SND_ALOOP is not set -# CONFIG_SND_MTPAV is not set -# CONFIG_SND_SERIAL_U16550 is not set -# CONFIG_SND_MPU401 is not set -CONFIG_SND_PCI=y -# CONFIG_SND_AD1889 is not set -# CONFIG_SND_ALS300 is not set -# CONFIG_SND_ALI5451 is not set -# CONFIG_SND_ATIIXP is not set -# CONFIG_SND_ATIIXP_MODEM is not set -# CONFIG_SND_AU8810 is not set -# CONFIG_SND_AU8820 is not set -# CONFIG_SND_AU8830 is not set -# CONFIG_SND_AW2 is not set -# CONFIG_SND_AZT3328 is not set -# CONFIG_SND_BT87X is not set -# CONFIG_SND_CA0106 is not set -# CONFIG_SND_CMIPCI is not set -# CONFIG_SND_OXYGEN is not set -# CONFIG_SND_CS4281 is not set -# CONFIG_SND_CS46XX is not set -# CONFIG_SND_CTXFI is not set -# CONFIG_SND_DARLA20 is not set -# CONFIG_SND_GINA20 is not set -# CONFIG_SND_LAYLA20 is not set -# CONFIG_SND_DARLA24 is not set -# CONFIG_SND_GINA24 is not set -# CONFIG_SND_LAYLA24 is not set -# CONFIG_SND_MONA is not set -# CONFIG_SND_MIA is not set -# CONFIG_SND_ECHO3G is not set -# CONFIG_SND_INDIGO is not set -# CONFIG_SND_INDIGOIO is not set -# CONFIG_SND_INDIGODJ is not set -# CONFIG_SND_INDIGOIOX is not set -# CONFIG_SND_INDIGODJX is not set -# CONFIG_SND_EMU10K1 is not set -# CONFIG_SND_EMU10K1X is not set -# CONFIG_SND_ENS1370 is not set -# CONFIG_SND_ENS1371 is not set -# CONFIG_SND_ES1938 is not set -# CONFIG_SND_ES1968 is not set -# CONFIG_SND_FM801 is not set -# CONFIG_SND_HDSP is not set -# CONFIG_SND_HDSPM is not set -# CONFIG_SND_ICE1712 is not set -# CONFIG_SND_ICE1724 is not set -# CONFIG_SND_INTEL8X0 is not set -# CONFIG_SND_INTEL8X0M is not set -# CONFIG_SND_KORG1212 is not set -# CONFIG_SND_LOLA is not set -# CONFIG_SND_LX6464ES is not set -# CONFIG_SND_MAESTRO3 is not set -# CONFIG_SND_MIXART is not set -# CONFIG_SND_NM256 is not set -# CONFIG_SND_PCXHR is not set -# CONFIG_SND_RIPTIDE is not set -# CONFIG_SND_RME32 is not set -# CONFIG_SND_RME96 is not set -# CONFIG_SND_RME9652 is not set -# CONFIG_SND_SE6X is not set -# CONFIG_SND_SONICVIBES is not set -# CONFIG_SND_TRIDENT is not set -# CONFIG_SND_VIA82XX is not set -# CONFIG_SND_VIA82XX_MODEM is not set -# CONFIG_SND_VIRTUOSO is not set -# CONFIG_SND_VX222 is not set -# CONFIG_SND_YMFPCI is not set - -# -# HD-Audio -# -# CONFIG_SND_HDA_INTEL is not set -CONFIG_SND_HDA_PREALLOC_SIZE=64 -CONFIG_SND_SPI=y -CONFIG_SND_USB=y -CONFIG_SND_USB_AUDIO=y -# CONFIG_SND_USB_UA101 is not set -# CONFIG_SND_USB_CAIAQ is not set -# CONFIG_SND_USB_6FIRE is not set -# CONFIG_SND_USB_HIFACE is not set -# CONFIG_SND_BCD2000 is not set -# CONFIG_SND_USB_POD is not set -# CONFIG_SND_USB_PODHD is not set -# CONFIG_SND_USB_TONEPORT is not set -# CONFIG_SND_USB_VARIAX is not set -CONFIG_SND_USB_AUDIO_QMI=y -CONFIG_SND_SOC=y -CONFIG_SND_SOC_COMPRESS=y -# CONFIG_SND_ATMEL_SOC is not set -# CONFIG_SND_DESIGNWARE_I2S is not set - -# -# SoC Audio for Freescale CPUs -# - -# -# Common SoC Audio options for Freescale CPUs: -# -# CONFIG_SND_SOC_FSL_ASRC is not set -# CONFIG_SND_SOC_FSL_SAI is not set -# CONFIG_SND_SOC_FSL_SSI is not set -# CONFIG_SND_SOC_FSL_SPDIF is not set -# CONFIG_SND_SOC_FSL_ESAI is not set -# CONFIG_SND_SOC_IMX_AUDMUX is not set - -# -# MSM SoC Audio support -# -CONFIG_SND_SOC_MSM_HOSTLESS_PCM=y -CONFIG_SND_SOC_MSM_QDSP6V2_INTF=y -CONFIG_SND_SOC_QDSP6V2=y -# CONFIG_SND_SOC_QDSP_DEBUG is not set -CONFIG_FORCE_24BIT_COPP=y -CONFIG_AHC=y -# CONFIG_DOLBY_DS2 is not set -CONFIG_DOLBY_LICENSE=y -CONFIG_DTS_EAGLE=y -CONFIG_DTS_SRS_TM=y -CONFIG_QTI_PP=y -# CONFIG_QTI_PP_AUDIOSPHERE is not set -CONFIG_SND_SOC_CPE=y -# CONFIG_SND_SOC_INT_CODEC is not set -# CONFIG_SND_SOC_EXT_CODEC is not set -# CONFIG_SND_SOC_MSM8996_VM is not set -CONFIG_SND_SOC_MSM8998=y -# CONFIG_SND_SOC_QCOM is not set - -# -# Allwinner SoC Audio support -# -# CONFIG_SND_SUN4I_CODEC is not set -# CONFIG_SND_SOC_XTFPGA_I2S is not set -CONFIG_SND_SOC_I2C_AND_SPI=y - -# -# CODEC drivers -# -# CONFIG_SND_SOC_AC97_CODEC is not set -# CONFIG_SND_SOC_ADAU1701 is not set -# CONFIG_SND_SOC_AK4104 is not set -# CONFIG_SND_SOC_AK4554 is not set -# CONFIG_SND_SOC_AK4613 is not set -# CONFIG_SND_SOC_AK4642 is not set -# CONFIG_SND_SOC_AK5386 is not set -# CONFIG_SND_SOC_ALC5623 is not set -# CONFIG_SND_SOC_CS35L32 is not set -# CONFIG_SND_SOC_CS42L51_I2C is not set -# CONFIG_SND_SOC_CS42L52 is not set -# CONFIG_SND_SOC_CS42L56 is not set -# CONFIG_SND_SOC_CS42L73 is not set -# CONFIG_SND_SOC_CS4265 is not set -# CONFIG_SND_SOC_CS4270 is not set -# CONFIG_SND_SOC_CS4271_I2C is not set -# CONFIG_SND_SOC_CS4271_SPI is not set -# CONFIG_SND_SOC_CS42XX8_I2C is not set -# CONFIG_SND_SOC_CS4349 is not set -# CONFIG_SND_SOC_HDMI_CODEC is not set -# CONFIG_SND_SOC_ES8328 is not set -# CONFIG_SND_SOC_GTM601 is not set -# CONFIG_SND_SOC_PCM1681 is not set -# CONFIG_SND_SOC_PCM1792A is not set -# CONFIG_SND_SOC_PCM512x_I2C is not set -# CONFIG_SND_SOC_PCM512x_SPI is not set -# CONFIG_SND_SOC_RT5631 is not set -# CONFIG_SND_SOC_RT5677_SPI is not set -# CONFIG_SND_SOC_SGTL5000 is not set -# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set -# CONFIG_SND_SOC_SPDIF is not set -# CONFIG_SND_SOC_SSM2602_SPI is not set -# CONFIG_SND_SOC_SSM2602_I2C is not set -# CONFIG_SND_SOC_SSM4567 is not set -# CONFIG_SND_SOC_STA32X is not set -# CONFIG_SND_SOC_STA350 is not set -# CONFIG_SND_SOC_STI_SAS is not set -# CONFIG_SND_SOC_TAS2552 is not set -# CONFIG_SND_SOC_TAS5086 is not set -# CONFIG_SND_SOC_TAS571X is not set -# CONFIG_SND_SOC_TFA9879 is not set -# CONFIG_SND_SOC_TLV320AIC23_I2C is not set -# CONFIG_SND_SOC_TLV320AIC23_SPI is not set -# CONFIG_SND_SOC_TLV320AIC31XX is not set -# CONFIG_SND_SOC_TLV320AIC3X is not set -# CONFIG_SND_SOC_TS3A227E is not set -CONFIG_SND_SOC_WCD934X_DSD=y -CONFIG_SND_SOC_WCD9335=y -CONFIG_SND_SOC_WCD934X=y -CONFIG_SND_SOC_WCD934X_MBHC=y -CONFIG_SND_SOC_WSA881X=y -CONFIG_SND_SOC_WCD9XXX=y -CONFIG_SND_SOC_WCD9XXX_V2=y -CONFIG_SND_SOC_WCD_CPE=y -CONFIG_AUDIO_EXT_CLK=y -CONFIG_SND_SOC_WCD_MBHC=y -CONFIG_SND_SOC_WCD_DSP_MGR=y -CONFIG_SND_SOC_WCD_SPI=y -# CONFIG_SND_SOC_WM8510 is not set -# CONFIG_SND_SOC_WM8523 is not set -# CONFIG_SND_SOC_WM8580 is not set -# CONFIG_SND_SOC_WM8711 is not set -# CONFIG_SND_SOC_WM8728 is not set -# CONFIG_SND_SOC_WM8731 is not set -# CONFIG_SND_SOC_WM8737 is not set -# CONFIG_SND_SOC_WM8741 is not set -# CONFIG_SND_SOC_WM8750 is not set -# CONFIG_SND_SOC_WM8753 is not set -# CONFIG_SND_SOC_WM8770 is not set -# CONFIG_SND_SOC_WM8776 is not set -# CONFIG_SND_SOC_WM8804_I2C is not set -# CONFIG_SND_SOC_WM8804_SPI is not set -# CONFIG_SND_SOC_WM8903 is not set -# CONFIG_SND_SOC_WM8962 is not set -# CONFIG_SND_SOC_WM8978 is not set -# CONFIG_SND_SOC_TPA6130A2 is not set -CONFIG_SND_SOC_MSM_STUB=y -CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y -# CONFIG_SND_SOC_SDM660_CDC is not set -# CONFIG_SND_SOC_MSM_SDW is not set -# CONFIG_SND_SIMPLE_CARD is not set -# CONFIG_SOUND_PRIME is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -CONFIG_UHID=y -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_ASUS_GAMEPAD is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_BETOP_FF is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CORSAIR is not set -# CONFIG_HID_PRODIKEYS is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELECOM=y -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_GEMBIRD is not set -# CONFIG_HID_GFRM is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_GT683R is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -CONFIG_HID_LOGITECH=y -# CONFIG_HID_LOGITECH_HIDPP is not set -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -CONFIG_HID_MICROSOFT=y -# CONFIG_HID_MONTEREY is not set -CONFIG_HID_MULTITOUCH=y -# CONFIG_HID_NINTENDO is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -CONFIG_HID_PANTHERLORD=y -# CONFIG_PANTHERLORD_FF is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -CONFIG_HID_PLANTRONICS=y -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -CONFIG_HID_SONY=y -# CONFIG_SONY_FF is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEAM is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THINGM is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_MON is not set -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_XHCI_PLATFORM=y -CONFIG_USB_EHCI_HCD=y -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_MSM is not set -CONFIG_USB_EHCI_HCD_PLATFORM=y -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -# CONFIG_USB_UHCI_HCD is not set -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_TEST_MODE is not set -CONFIG_USB_HOST_EXTRA_NOTIFICATION=y - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_MUSB_HDRC is not set -CONFIG_USB_DWC3=y -# CONFIG_USB_DWC3_HOST is not set -# CONFIG_USB_DWC3_GADGET is not set -CONFIG_USB_DWC3_DUAL_ROLE=y - -# -# Platform Glue Driver Support -# -# CONFIG_USB_DWC3_PCI is not set -CONFIG_USB_DWC3_QCOM=y -CONFIG_USB_DWC3_MSM_ID_POLL=y -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -CONFIG_USB_ISP1760=y -CONFIG_USB_ISP1760_HCD=y -CONFIG_USB_ISP1760_HOST_ROLE=y -# CONFIG_USB_ISP1760_GADGET_ROLE is not set -# CONFIG_USB_ISP1760_DUAL_ROLE is not set - -# -# USB Power Delivery -# -CONFIG_USB_PD=y -CONFIG_USB_PD_POLICY=y -CONFIG_QPNP_USB_PDPHY=y - -# -# USB port drivers -# -# CONFIG_USB_SERIAL is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_LED is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -CONFIG_USB_EHSET_TEST_FIXTURE=y -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -# CONFIG_USB_QTI_KS_BRIDGE is not set -# CONFIG_USB_QCOM_DIAG_BRIDGE is not set - -# -# USB Physical Layer drivers -# -CONFIG_USB_PHY=y -# CONFIG_USB_OTG_WAKELOCK is not set -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_MSM_OTG is not set -# CONFIG_USB_QCOM_8X16_PHY is not set -# CONFIG_USB_MSM_HSPHY is not set -# CONFIG_USB_MSM_SSPHY is not set -CONFIG_USB_MSM_SSPHY_QMP=y -CONFIG_MSM_QUSB_PHY=y -# CONFIG_USB_ULPI is not set -CONFIG_DUAL_ROLE_USB_INTF=y -CONFIG_USB_GADGET=y -# CONFIG_USB_GADGET_DEBUG is not set -# CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set -CONFIG_USB_GADGET_VBUS_DRAW=500 -CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 - -# -# USB Peripheral Controller -# -# CONFIG_USB_FOTG210_UDC is not set -# CONFIG_USB_GR_UDC is not set -# CONFIG_USB_R8A66597 is not set -# CONFIG_USB_PXA27X is not set -# CONFIG_USB_MV_UDC is not set -# CONFIG_USB_MV_U3D is not set -# CONFIG_USB_M66592 is not set -# CONFIG_USB_BDC_UDC is not set -# CONFIG_USB_AMD5536UDC is not set -# CONFIG_USB_NET2272 is not set -# CONFIG_USB_NET2280 is not set -# CONFIG_USB_GOKU is not set -# CONFIG_USB_EG20T is not set -# CONFIG_USB_GADGET_XILINX is not set -# CONFIG_USB_DUMMY_HCD is not set -CONFIG_USB_LIBCOMPOSITE=y -CONFIG_USB_F_MASS_STORAGE=y -CONFIG_USB_F_FS=y -CONFIG_USB_F_MIDI=y -CONFIG_USB_F_HID=y -CONFIG_USB_F_MTP=y -CONFIG_USB_F_PTP=y -CONFIG_USB_F_AUDIO_SRC=y -CONFIG_USB_F_ACC=y -CONFIG_USB_F_DIAG=y -CONFIG_USB_F_GSI=y -CONFIG_USB_F_CDEV=y -CONFIG_USB_F_QDSS=y -CONFIG_USB_F_CCID=y -CONFIG_USB_CONFIGFS=y -# CONFIG_USB_CONFIGFS_SERIAL is not set -# CONFIG_USB_CONFIGFS_ACM is not set -# CONFIG_USB_CONFIGFS_OBEX is not set -# CONFIG_USB_CONFIGFS_NCM is not set -# CONFIG_USB_CONFIGFS_ECM is not set -# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set -# CONFIG_USB_CONFIGFS_QCRNDIS is not set -# CONFIG_USB_CONFIGFS_RNDIS is not set -# CONFIG_USB_CONFIGFS_RMNET_BAM is not set -# CONFIG_USB_CONFIGFS_EEM is not set -CONFIG_USB_CONFIGFS_MASS_STORAGE=y -# CONFIG_USB_CONFIGFS_F_LB_SS is not set -CONFIG_USB_CONFIGFS_F_FS=y -CONFIG_USB_CONFIGFS_F_MTP=y -CONFIG_USB_CONFIGFS_F_PTP=y -CONFIG_USB_CONFIGFS_F_ACC=y -CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y -CONFIG_USB_CONFIGFS_UEVENT=y -# CONFIG_USB_CONFIGFS_F_UAC1 is not set -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -# CONFIG_USB_CONFIGFS_F_UAC2 is not set -CONFIG_USB_CONFIGFS_F_MIDI=y -CONFIG_USB_CONFIGFS_F_HID=y -# CONFIG_USB_CONFIGFS_F_UVC is not set -# CONFIG_USB_CONFIGFS_F_PRINTER is not set -CONFIG_USB_CONFIGFS_F_DIAG=y -CONFIG_USB_CONFIGFS_F_GSI=y -CONFIG_USB_CONFIGFS_F_CDEV=y -CONFIG_USB_CONFIGFS_F_QDSS=y -CONFIG_USB_CONFIGFS_F_CCID=y -# CONFIG_USB_ANDROID_PRODUCTION is not set -# CONFIG_USB_ZERO is not set -# CONFIG_USB_AUDIO is not set -# CONFIG_USB_ETH is not set -# CONFIG_USB_G_NCM is not set -# CONFIG_USB_GADGETFS is not set -# CONFIG_USB_FUNCTIONFS is not set -# CONFIG_USB_MASS_STORAGE is not set -# CONFIG_USB_G_SERIAL is not set -# CONFIG_USB_MIDI_GADGET is not set -# CONFIG_USB_G_PRINTER is not set -# CONFIG_USB_CDC_COMPOSITE is not set -# CONFIG_USB_G_ACM_MS is not set -# CONFIG_USB_G_MULTI is not set -# CONFIG_USB_G_HID is not set -# CONFIG_USB_G_DBGP is not set -# CONFIG_USB_G_WEBCAM is not set -# CONFIG_USB_LED_TRIG is not set -# CONFIG_UWB is not set -CONFIG_MMC=y -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_PERF_PROFILING=y -# CONFIG_MMC_RING_BUFFER is not set -# CONFIG_MMC_EMBEDDED_SDIO is not set -# CONFIG_MMC_PARANOID_SD_INIT is not set -CONFIG_MMC_CLKGATE=y -# CONFIG_MMC_CMD_DEBUG is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_BLOCK_BOUNCE=y -CONFIG_MMC_BLOCK_DEFERRED_RESUME=y -# CONFIG_SDIO_UART is not set -CONFIG_MMC_TEST=y -# CONFIG_MMC_SIMULATE_MAX_SPEED is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_ARMMMCI is not set -CONFIG_MMC_SDHCI=y -# CONFIG_MMC_SDHCI_PCI is not set -# CONFIG_MMC_SDHCI_ACPI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -CONFIG_MMC_SDHCI_MSM=y -# CONFIG_MMC_SDHCI_MSM_ICE is not set -# CONFIG_MMC_ENABLE_CLK_SCALE is not set -# CONFIG_MMC_TIFM_SD is not set -# CONFIG_MMC_SPI is not set -# CONFIG_MMC_CB710 is not set -# CONFIG_MMC_VIA_SDMMC is not set -# CONFIG_MMC_DW is not set -# CONFIG_MMC_VUB300 is not set -# CONFIG_MMC_USHC is not set -# CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MMC_CQ_HCI is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -# CONFIG_MMC_MTK is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set - -# -# LED drivers -# -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP5521 is not set -# CONFIG_LEDS_LP5523 is not set -# CONFIG_LEDS_LP5562 is not set -# CONFIG_LEDS_LP8501 is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -# CONFIG_LEDS_BLINKM is not set -CONFIG_LEDS_QPNP=y -# CONFIG_LEDS_QPNP_FLASH is not set -CONFIG_LEDS_QPNP_FLASH_V2=y -CONFIG_LEDS_QPNP_WLED=y -# CONFIG_LEDS_QPNP_RGB_SCALE is not set -CONFIG_LEDS_SYSCON=y -# CONFIG_LEDS_QPNP_HAPTICS is not set - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -# CONFIG_LEDS_TRIGGER_TIMER is not set -# CONFIG_LEDS_TRIGGER_ONESHOT is not set -# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set -# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_GPIO is not set -# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set - -# -# iptables trigger is under Netfilter config (LED target) -# -# CONFIG_LEDS_TRIGGER_TRANSIENT is not set -# CONFIG_LEDS_TRIGGER_CAMERA is not set -CONFIG_SWITCH=y -# CONFIG_SWITCH_GPIO is not set -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_MM_EDAC=y -# CONFIG_EDAC_XGENE is not set -CONFIG_EDAC_CORTEX_ARM64=y -# CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_CE is not set -# CONFIG_EDAC_CORTEX_ARM64_DBE_IRQ_ONLY is not set -CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABX80X is not set -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -# CONFIG_RTC_DRV_DS3232 is not set -# CONFIG_RTC_DRV_HYM8563 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12057 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set -# CONFIG_RTC_DRV_RV8803 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_DS3234 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_MCP795 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -# CONFIG_RTC_DRV_PL031 is not set -# CONFIG_RTC_DRV_PM8XXX is not set -# CONFIG_RTC_DRV_SNVS is not set -CONFIG_RTC_DRV_QPNP=y - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -CONFIG_ESOC=y -CONFIG_ESOC_DEV=y -CONFIG_ESOC_CLIENT=y -# CONFIG_ESOC_DEBUG is not set -CONFIG_ESOC_MDM_4x=y -CONFIG_ESOC_MDM_DRV=y -# CONFIG_ESOC_MDM_DBG_ENG is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_AMBA_PL08X is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_QCOM_BAM_DMA is not set -CONFIG_QCOM_SPS_DMA=y -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set - -# -# DMA Clients -# -# CONFIG_ASYNC_TX_DMA is not set -# CONFIG_DMATEST is not set -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=y -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -# CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -# CONFIG_UIO_PCI_GENERIC is not set -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_UIO_MSM_SHAREDMEM=y -# CONFIG_VFIO is not set -# CONFIG_VIRT_DRIVERS is not set - -# -# Virtio drivers -# -# CONFIG_VIRTIO_PCI is not set -# CONFIG_VIRTIO_MMIO is not set - -# -# Microsoft Hyper-V guest support -# -CONFIG_STAGING=y -# CONFIG_PRISM2_USB is not set -# CONFIG_COMEDI is not set -# CONFIG_RTL8192U is not set -# CONFIG_RTLLIB is not set -# CONFIG_R8712U is not set -# CONFIG_R8188EU is not set -# CONFIG_R8723AU is not set -# CONFIG_RTS5208 is not set - -# -# IIO staging drivers -# - -# -# Accelerometers -# -# CONFIG_ADIS16201 is not set -# CONFIG_ADIS16203 is not set -# CONFIG_ADIS16204 is not set -# CONFIG_ADIS16209 is not set -# CONFIG_ADIS16220 is not set -# CONFIG_ADIS16240 is not set -# CONFIG_LIS3L02DQ is not set - -# -# Analog to digital converters -# -# CONFIG_AD7606 is not set -# CONFIG_AD7780 is not set -# CONFIG_AD7816 is not set -# CONFIG_AD7192 is not set -# CONFIG_AD7280 is not set - -# -# Analog digital bi-direction converters -# -# CONFIG_ADT7316 is not set - -# -# Capacitance to digital converters -# -# CONFIG_AD7150 is not set -# CONFIG_AD7152 is not set -# CONFIG_AD7746 is not set - -# -# Direct Digital Synthesis -# -# CONFIG_AD9832 is not set -# CONFIG_AD9834 is not set - -# -# Digital gyroscope sensors -# -# CONFIG_ADIS16060 is not set - -# -# Network Analyzer, Impedance Converters -# -# CONFIG_AD5933 is not set - -# -# Light sensors -# -# CONFIG_SENSORS_ISL29018 is not set -# CONFIG_SENSORS_ISL29028 is not set -# CONFIG_TSL2583 is not set -# CONFIG_TSL2x7x is not set - -# -# Magnetometer sensors -# -# CONFIG_SENSORS_HMC5843_I2C is not set -# CONFIG_SENSORS_HMC5843_SPI is not set - -# -# Active energy metering IC -# -# CONFIG_ADE7753 is not set -# CONFIG_ADE7754 is not set -# CONFIG_ADE7758 is not set -# CONFIG_ADE7759 is not set -# CONFIG_ADE7854 is not set - -# -# Resolver to digital converters -# -# CONFIG_AD2S90 is not set -# CONFIG_AD2S1200 is not set -# CONFIG_AD2S1210 is not set - -# -# Triggers - standalone -# -# CONFIG_IIO_SIMPLE_DUMMY is not set -# CONFIG_FB_SM750 is not set -# CONFIG_FB_XGI is not set - -# -# Speakup console speech -# -# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -CONFIG_ASHMEM=y -CONFIG_ANDROID_TIMED_OUTPUT=y -CONFIG_ANDROID_TIMED_GPIO=y -CONFIG_ANDROID_LOW_MEMORY_KILLER=y -CONFIG_ANDROID_LOW_MEMORY_KILLER_TNG=y -CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y -CONFIG_ANDROID_LOW_MEMORY_KILLER_STATS=y -CONFIG_SYNC=y -CONFIG_SW_SYNC=y -# CONFIG_SW_SYNC_USER is not set -CONFIG_ONESHOT_SYNC=y -# CONFIG_ONESHOT_SYNC_USER is not set -# CONFIG_ANDROID_VSOC is not set -CONFIG_ION=y -# CONFIG_ION_TEST is not set -# CONFIG_ION_DUMMY is not set -CONFIG_ION_MSM=y -CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y -# CONFIG_STAGING_BOARD is not set -# CONFIG_WIMAX_GDM72XX is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_LUSTRE_FS is not set -# CONFIG_DGNC is not set -# CONFIG_DGAP is not set -# CONFIG_GS_FPGABOOT is not set -# CONFIG_FB_TFT is not set -# CONFIG_FSL_MC_BUS is not set -# CONFIG_WILC1000_DRIVER is not set -# CONFIG_MOST is not set -CONFIG_SONY_FIPS_KSCL=y - -# -# Qualcomm Atheros CLD WLAN module -# -CONFIG_QCA_CLD_WLAN=y -# CONFIG_GOLDFISH is not set -# CONFIG_CHROME_PLATFORMS is not set - -# -# Qualcomm MSM specific device drivers -# -CONFIG_QPNP_REVID=y -CONFIG_QPNP_COINCELL=y -CONFIG_SPS=y -# CONFIG_SPS_SUPPORT_BAMDMA is not set -CONFIG_SPS_SUPPORT_NDP_BAM=y -# CONFIG_EP_PCIE is not set -CONFIG_IPA=y -CONFIG_RMNET_IPA=y -CONFIG_GSI=y -CONFIG_IPA3=y -CONFIG_RMNET_IPA3=y -# CONFIG_IPA_UT is not set -# CONFIG_GPIO_USB_DETECT is not set -CONFIG_MSM_MHI=y -CONFIG_MSM_MHI_UCI=y -# CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_11AD is not set -# CONFIG_SEEMP_CORE is not set -CONFIG_USB_BAM=y -CONFIG_MSM_EXT_DISPLAY=y -# CONFIG_SDIO_QCN is not set -CONFIG_CLKDEV_LOOKUP=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK_MSM=y -# CONFIG_MSM_CLK_CONTROLLER_V2 is not set -CONFIG_MSM_MDSS_PLL=y -CONFIG_HWSPINLOCK=y - -# -# Hardware Spinlock drivers -# -# CONFIG_HWSPINLOCK_QCOM is not set -CONFIG_REMOTE_SPINLOCK_MSM=y - -# -# Clock Source drivers -# -CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_ACPI=y -CONFIG_CLKSRC_PROBE=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_MSM_TIMER_LEAP=y -CONFIG_ARM_ARCH_TIMER_VCT_ACCESS=y -# CONFIG_ARM_TIMER_SP804 is not set -# CONFIG_ATMEL_PIT is not set -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -# CONFIG_MAILBOX is not set -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -CONFIG_IOMMU_IO_PGTABLE_FAST=y -# CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST is not set -# CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB is not set -CONFIG_OF_IOMMU=y -CONFIG_ARM_SMMU=y -# CONFIG_ARM_SMMU_V3 is not set -# CONFIG_IOMMU_DEBUG is not set - -# -# Remoteproc drivers -# -# CONFIG_STE_MODEM_RPROC is not set - -# -# Rpmsg drivers -# - -# -# SOC (System On Chip) specific Drivers -# -# CONFIG_MSM_HAB is not set -# CONFIG_MSM_AGL is not set -# CONFIG_MSM_PASR is not set -# CONFIG_MSM_INRUSH_CURRENT_MITIGATION is not set -# CONFIG_MSM_PFE_WA is not set -# CONFIG_QCOM_COMMON_LOG is not set -CONFIG_MSM_SMEM=y -# CONFIG_QPNP_HAPTIC is not set -# CONFIG_QPNP_PBS is not set -CONFIG_MSM_SMD=y -# CONFIG_MSM_SMD_DEBUG is not set -CONFIG_MSM_GLINK=y -CONFIG_MSM_GLINK_LOOPBACK_SERVER=y -CONFIG_MSM_GLINK_SMD_XPRT=y -CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y -CONFIG_MSM_GLINK_SPI_XPRT=y -CONFIG_MSM_SPCOM=y -CONFIG_MSM_SPSS_UTILS=y -# CONFIG_MSM_SMEM_LOGGING is not set -CONFIG_MSM_SMP2P=y -# CONFIG_MSM_SMP2P_TEST is not set -CONFIG_MSM_QMI_INTERFACE=y -# CONFIG_MSM_L2_IA_DEBUG is not set -CONFIG_MSM_RPM_SMD=y -CONFIG_QCOM_BUS_SCALING=y -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_SMEM is not set -CONFIG_MSM_SERVICE_LOCATOR=y -# CONFIG_MSM_HVC is not set -CONFIG_QCOM_DCC=y -CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y -CONFIG_MSM_SYSMON_GLINK_COMM=y -# CONFIG_MSM_IPC_ROUTER_MHI_XPRT is not set -CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y -# CONFIG_MSM_SYSTEM_HEALTH_MONITOR is not set -CONFIG_MSM_GLINK_PKT=y -CONFIG_MSM_SPM=y -# CONFIG_MSM_L2_SPM is not set -CONFIG_QCOM_SCM=y -# CONFIG_QCOM_SCM_QCPE is not set -# CONFIG_QCOM_EARLY_DOMAIN is not set -# CONFIG_QCOM_SCM_XPU is not set -# CONFIG_QCOM_SCM_ERRATA is not set -CONFIG_QCOM_WATCHDOG_V2=y -CONFIG_MSM_FORCE_PANIC_ON_WDOG_BARK=y -CONFIG_QCOM_IRQ_HELPER=y -# CONFIG_QCOM_MEMORY_DUMP is not set -CONFIG_QCOM_MEMORY_DUMP_V2=y -# CONFIG_QCOM_MINIDUMP is not set -CONFIG_ICNSS=y -# CONFIG_ICNSS_DEBUG is not set -CONFIG_MSM_SECURE_BUFFER=y -# CONFIG_MSM_GLADIATOR_ERP is not set -# CONFIG_MSM_GLADIATOR_ERP_V2 is not set -# CONFIG_MSM_GLADIATOR_HANG_DETECT is not set -# CONFIG_MSM_CORE_HANG_DETECT is not set -CONFIG_MSM_RUN_QUEUE_STATS=y -# CONFIG_MSM_JTAGV8 is not set -CONFIG_MSM_BOOT_STATS=y -# CONFIG_MSM_BOOT_TIME_MARKER is not set -# CONFIG_QCOM_CPUSS_DUMP is not set -# CONFIG_MSM_QDSP6_APRV2 is not set -# CONFIG_MSM_QDSP6_APRV3 is not set -CONFIG_MSM_QDSP6_APRV2_GLINK=y -# CONFIG_MSM_QDSP6_APRV3_GLINK is not set -CONFIG_MSM_QDSP6_SSR=y -CONFIG_MSM_QDSP6_PDR=y -CONFIG_MSM_QDSP6_NOTIFIER=y -CONFIG_MSM_ADSP_LOADER=y -# CONFIG_MSM_CDSP_LOADER is not set -# CONFIG_MSM_LPASS_RESOURCE_MANAGER is not set -# CONFIG_MSM_PERFORMANCE is not set -CONFIG_MSM_SUBSYSTEM_RESTART=y -# CONFIG_MSM_SYSMON_COMM is not set -CONFIG_MSM_PIL=y -CONFIG_MSM_PIL_SSR_GENERIC=y -CONFIG_MSM_PIL_MSS_QDSP6V5=y -CONFIG_TRACER_PKT=y -CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y -CONFIG_MSM_MPM_OF=y -CONFIG_MSM_EVENT_TIMER=y -CONFIG_MSM_AVTIMER=y -# CONFIG_MSM_KERNEL_PROTECT is not set -CONFIG_QCOM_REMOTEQDSS=y -CONFIG_MSM_SERVICE_NOTIFIER=y -# CONFIG_MSM_QBT1000 is not set -CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y -CONFIG_MSM_RPM_LOG=y -CONFIG_MSM_RPM_STATS_LOG=y -CONFIG_SUBSYS_LAST_ERR_LOG=y -CONFIG_LAST_LOGS=y -CONFIG_SECURITY_STATUS=y -CONFIG_QSEE_IPC_IRQ_BRIDGE=y -CONFIG_WCD_DSP_GLINK=y -CONFIG_QCOM_SMCINVOKE=y -CONFIG_QCOM_EARLY_RANDOM=y -# CONFIG_QCOM_CX_IPEAK is not set -# CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_CE is not set -# CONFIG_MSM_CACHE_M4M_ERP64_PANIC_ON_UE is not set -# CONFIG_QCOM_QDSS_BRIDGE is not set -# CONFIG_MFSE_QMI is not set -CONFIG_MEM_SHARE_QMI_SERVICE=y -# CONFIG_SUNXI_SRAM is not set -# CONFIG_SOC_TI is not set -CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# -CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y -CONFIG_DEVFREQ_GOV_PERFORMANCE=y -CONFIG_DEVFREQ_GOV_POWERSAVE=y -CONFIG_DEVFREQ_GOV_USERSPACE=y -CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ=y -CONFIG_DEVFREQ_GOV_CPUFREQ=y -CONFIG_QCOM_BIMC_BWMON=y -CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON=y -# CONFIG_ARMBW_HWMON is not set -CONFIG_ARM_MEMLAT_MON=y -# CONFIG_QCOMCCI_HWMON is not set -# CONFIG_QCOM_M4M_HWMON is not set -CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y -# CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON is not set -CONFIG_DEVFREQ_GOV_SPDM_HYP=y -CONFIG_DEVFREQ_GOV_MEMLAT=y - -# -# DEVFREQ Drivers -# -# CONFIG_DEVFREQ_SIMPLE_DEV is not set -CONFIG_QCOM_DEVFREQ_DEVBW=y -CONFIG_SPDM_SCM=y -CONFIG_DEVFREQ_SPDM=y -# CONFIG_PM_DEVFREQ_EVENT is not set -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -# CONFIG_EXTCON_ADC_JACK is not set -# CONFIG_EXTCON_GPIO is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_MEMORY is not set -CONFIG_IIO=y -# CONFIG_IIO_BUFFER is not set -# CONFIG_IIO_TRIGGER is not set - -# -# Accelerometers -# -# CONFIG_BMA180 is not set -# CONFIG_BMC150_ACCEL is not set -# CONFIG_IIO_ST_ACCEL_3AXIS is not set -# CONFIG_KXSD9 is not set -# CONFIG_KXCJK1013 is not set -# CONFIG_MMA8452 is not set -# CONFIG_MMA9551 is not set -# CONFIG_MMA9553 is not set -# CONFIG_MXC4005 is not set -# CONFIG_STK8312 is not set - -# -# Analog to digital converters -# -# CONFIG_AD7266 is not set -# CONFIG_AD7291 is not set -# CONFIG_AD7298 is not set -# CONFIG_AD7476 is not set -# CONFIG_AD7791 is not set -# CONFIG_AD7793 is not set -# CONFIG_AD7887 is not set -# CONFIG_AD7923 is not set -# CONFIG_AD799X is not set -# CONFIG_CC10001_ADC is not set -# CONFIG_HI8435 is not set -# CONFIG_MAX1027 is not set -# CONFIG_MAX1363 is not set -# CONFIG_MCP320X is not set -# CONFIG_MCP3422 is not set -# CONFIG_NAU7802 is not set -# CONFIG_QCOM_SPMI_IADC is not set -# CONFIG_QCOM_SPMI_VADC is not set -CONFIG_QCOM_RRADC=y -CONFIG_QCOM_TADC=y -# CONFIG_TI_ADC081C is not set -# CONFIG_TI_ADC128S052 is not set -# CONFIG_VF610_ADC is not set - -# -# Amplifiers -# -# CONFIG_AD8366 is not set - -# -# Chemical Sensors -# -# CONFIG_VZ89X is not set - -# -# Hid Sensor IIO Common -# - -# -# SSP Sensor Common -# -# CONFIG_IIO_SSP_SENSORHUB is not set - -# -# Digital to analog converters -# -# CONFIG_AD5064 is not set -# CONFIG_AD5360 is not set -# CONFIG_AD5380 is not set -# CONFIG_AD5421 is not set -# CONFIG_AD5446 is not set -# CONFIG_AD5449 is not set -# CONFIG_AD5504 is not set -# CONFIG_AD5624R_SPI is not set -# CONFIG_AD5686 is not set -# CONFIG_AD5755 is not set -# CONFIG_AD5764 is not set -# CONFIG_AD5791 is not set -# CONFIG_AD7303 is not set -# CONFIG_M62332 is not set -# CONFIG_MAX517 is not set -# CONFIG_MAX5821 is not set -# CONFIG_MCP4725 is not set -# CONFIG_MCP4922 is not set - -# -# Frequency Synthesizers DDS/PLL -# - -# -# Clock Generator/Distribution -# -# CONFIG_AD9523 is not set - -# -# Phase-Locked Loop (PLL) frequency synthesizers -# -# CONFIG_ADF4350 is not set - -# -# Digital gyroscope sensors -# -# CONFIG_ADIS16080 is not set -# CONFIG_ADIS16130 is not set -# CONFIG_ADIS16136 is not set -# CONFIG_ADIS16260 is not set -# CONFIG_ADXRS450 is not set -# CONFIG_BMG160 is not set -# CONFIG_IIO_ST_GYRO_3AXIS is not set -# CONFIG_ITG3200 is not set - -# -# Humidity sensors -# -# CONFIG_DHT11 is not set -# CONFIG_HDC100X is not set -# CONFIG_HTU21 is not set -# CONFIG_SI7005 is not set -# CONFIG_SI7020 is not set - -# -# Inertial measurement units -# -# CONFIG_ADIS16400 is not set -# CONFIG_ADIS16480 is not set -# CONFIG_KMX61 is not set -# CONFIG_INV_MPU6050_IIO is not set -# CONFIG_INV_MPU_IIO_I2C is not set -# CONFIG_INV_MPU_IIO_SPI is not set - -# -# Light sensors -# -# CONFIG_ACPI_ALS is not set -# CONFIG_ADJD_S311 is not set -# CONFIG_AL3320A is not set -# CONFIG_APDS9300 is not set -# CONFIG_APDS9960 is not set -# CONFIG_BH1750 is not set -# CONFIG_CM32181 is not set -# CONFIG_CM3232 is not set -# CONFIG_CM3323 is not set -# CONFIG_CM36651 is not set -# CONFIG_GP2AP020A00F is not set -# CONFIG_ISL29125 is not set -# CONFIG_JSA1212 is not set -# CONFIG_RPR0521 is not set -# CONFIG_LTR501 is not set -# CONFIG_OPT3001 is not set -# CONFIG_PA12203001 is not set -# CONFIG_STK3310 is not set -# CONFIG_TCS3414 is not set -# CONFIG_TCS3472 is not set -# CONFIG_SENSORS_TSL2563 is not set -# CONFIG_TSL4531 is not set -# CONFIG_US5182D is not set -# CONFIG_VCNL4000 is not set - -# -# Magnetometer sensors -# -# CONFIG_AK8975 is not set -# CONFIG_AK09911 is not set -# CONFIG_BMC150_MAGN is not set -# CONFIG_MAG3110 is not set -# CONFIG_MMC35240 is not set -# CONFIG_IIO_ST_MAGN_3AXIS is not set - -# -# Inclinometer sensors -# - -# -# Digital potentiometers -# -# CONFIG_MCP4531 is not set - -# -# Pressure sensors -# -# CONFIG_BMP280 is not set -# CONFIG_MPL115 is not set -# CONFIG_MPL3115 is not set -# CONFIG_MS5611 is not set -# CONFIG_MS5637 is not set -# CONFIG_IIO_ST_PRESS is not set -# CONFIG_T5403 is not set - -# -# Lightning sensors -# -# CONFIG_AS3935 is not set - -# -# Proximity sensors -# -# CONFIG_LIDAR_LITE_V2 is not set -# CONFIG_SX9500 is not set - -# -# Temperature sensors -# -# CONFIG_MLX90614 is not set -# CONFIG_TMP006 is not set -# CONFIG_TSYS01 is not set -# CONFIG_TSYS02D is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_PCA9685 is not set -CONFIG_PWM_QPNP=y -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ARM_GIC_V3_ACL=y -# CONFIG_ARM_GIC_V3_NO_ACCESS_CONTROL is not set -CONFIG_QCOM_SHOW_RESUME_IRQ=y -CONFIG_MSM_IRQ=y -# CONFIG_IPACK_BUS is not set -CONFIG_ARCH_HAS_RESET_CONTROLLER=y -CONFIG_RESET_CONTROLLER=y -# CONFIG_FMC is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_XGENE is not set -CONFIG_PHY_QCOM_UFS=y -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -CONFIG_ARM_PMU=y -CONFIG_RAS=y -# CONFIG_THUNDERBOLT is not set - -# -# Android -# -CONFIG_ANDROID=y -CONFIG_ANDROID_BINDER_IPC=y -CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder" -# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set -# CONFIG_LIBNVDIMM is not set -# CONFIG_NVMEM is not set -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set - -# -# FPGA Configuration Support -# -# CONFIG_FPGA is not set - -# -# Firmware Drivers -# -CONFIG_ARM_PSCI_FW=y -# CONFIG_FIRMWARE_MEMMAP is not set -# CONFIG_ISCSI_IBFT is not set -CONFIG_QCOM_SCM_64=y -CONFIG_HAVE_ARM_SMCCC=y -# CONFIG_MSM_TZ_LOG is not set -# CONFIG_BIF is not set -CONFIG_SENSORS_SSC=y -# CONFIG_TEE is not set - -# -# Firmware Drivers -# -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -# CONFIG_ACPI_DEBUGGER is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_PCI_SLOT is not set -CONFIG_ACPI_CONTAINER=y -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_CUSTOM_METHOD is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -# CONFIG_PMIC_OPREGION is not set - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT2=y -# CONFIG_EXT4_FS_POSIX_ACL is not set -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_EXT4_FS_ENCRYPTION=y -CONFIG_EXT4_FS_ICE_ENCRYPTION=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set -# CONFIG_NILFS2_FS is not set -CONFIG_F2FS_FS=y -CONFIG_F2FS_STAT_FS=y -CONFIG_F2FS_FS_XATTR=y -CONFIG_F2FS_FS_POSIX_ACL=y -CONFIG_F2FS_FS_SECURITY=y -# CONFIG_F2FS_CHECK_FS is not set -# CONFIG_F2FS_FS_ENCRYPTION is not set -# CONFIG_F2FS_FAULT_INJECTION is not set -# CONFIG_FS_DAX is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -# CONFIG_FANOTIFY is not set -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_PRINT_QUOTA_WARNING is not set -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -# CONFIG_AUTOFS4_FS is not set -CONFIG_FUSE_FS=y -# CONFIG_CUSE is not set -CONFIG_OVERLAY_FS=y - -# -# Caches -# -# CONFIG_FSCACHE is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_VFAT_FS_NO_DUALNAMES=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -CONFIG_SDFAT_FS=y -CONFIG_SDFAT_USE_FOR_EXFAT=y -CONFIG_SDFAT_DELAYED_META_DIRTY=y -CONFIG_SDFAT_SUPPORT_DIR_SYNC=y -CONFIG_SDFAT_DEFAULT_CODEPAGE=437 -CONFIG_SDFAT_DEFAULT_IOCHARSET="utf8" -# CONFIG_SDFAT_CHECK_RO_ATTR is not set -CONFIG_SDFAT_ALIGNED_MPAGE_WRITE=y -CONFIG_SDFAT_VIRTUAL_XATTR=y -CONFIG_SDFAT_VIRTUAL_XATTR_SELINUX_LABEL="u:object_r:vfat:s0" -CONFIG_SDFAT_DEBUG=y -# CONFIG_SDFAT_DBG_IOCTL is not set -CONFIG_SDFAT_DBG_MSG=y -# CONFIG_SDFAT_DBG_BUGON is not set -# CONFIG_SDFAT_DBG_WARNON is not set -CONFIG_SDFAT_STATISTICS=y -CONFIG_SDFAT_UEVENT=y -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -# CONFIG_PROC_KCORE is not set -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_CHILDREN is not set -CONFIG_PROC_UID=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_HUGETLBFS is not set -# CONFIG_HUGETLB_PAGE is not set -CONFIG_CONFIGFS_FS=y -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -CONFIG_ECRYPT_FS=y -CONFIG_ECRYPT_FS_MESSAGING=y -CONFIG_WTL_ENCRYPTION_FILTER=y -CONFIG_SDCARD_FS=y -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -# CONFIG_SQUASHFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_CONSOLE=y -# CONFIG_PSTORE_PMSG is not set -CONFIG_PSTORE_RAM=y -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -# CONFIG_NFS_FS is not set -# CONFIG_NFSD is not set -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="iso8859-1" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -# CONFIG_NLS_ASCII is not set -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set -# CONFIG_FILE_TABLE_DEBUG is not set -# CONFIG_VIRTUALIZATION is not set - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -# CONFIG_BOOT_PRINTK_DELAY is not set -CONFIG_DYNAMIC_DEBUG=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_GDB_SCRIPTS is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_READABLE_ASM is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_PAGE_OWNER is not set -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x0 -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SLUB_STATS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -# CONFIG_KASAN is not set -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -# CONFIG_LOCKUP_DETECTOR is not set -# CONFIG_DETECT_HUNG_TASK is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=5 -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -# CONFIG_PANIC_ON_SCHED_BUG is not set -# CONFIG_PANIC_ON_RT_THROTTLING is not set -CONFIG_SYSRQ_SCHED_DEBUG=y -CONFIG_SCHEDSTATS=y -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_HAVE_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_PROVE_RCU is not set -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -# CONFIG_RCU_STALL_WATCHDOG_BITE is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_IPC_LOGGING=y -CONFIG_QCOM_RTB=y -CONFIG_QCOM_RTB_SEPARATE_CPUS=y -CONFIG_TRACING=y -# CONFIG_TRACE_PRINTK is not set -CONFIG_GENERIC_TRACER=y -CONFIG_DISABLE_TRACE_PRINTK=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_PREEMPTIRQ_EVENTS is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_PROBE_EVENTS is not set -CONFIG_CPU_FREQ_SWITCH_PROFILER=y -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_TRACE_ENUM_MAP_FILE is not set -CONFIG_TRACING_EVENTS_GPIO=y - -# -# Runtime Testing -# -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_HASH is not set -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_MEMTEST is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_PANIC_ON_DATA_CORRUPTION is not set -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# CONFIG_CC_WERROR is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -# CONFIG_ARM64_PTDUMP is not set -CONFIG_STRICT_DEVMEM=y -# CONFIG_PID_IN_CONTEXTIDR is not set -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set -CONFIG_DEBUG_RODATA=y -CONFIG_DEBUG_ALIGN_RODATA=y -# CONFIG_FORCE_PAGES is not set -# CONFIG_FREE_PAGES_RDONLY is not set -# CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE is not set -CONFIG_CORESIGHT=y -CONFIG_CORESIGHT_EVENT=y -CONFIG_CORESIGHT_CSR=y -CONFIG_CORESIGHT_LINKS_AND_SINKS=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -# CONFIG_CORESIGHT_SINK_TPIU is not set -# CONFIG_CORESIGHT_SINK_ETBV10 is not set -# CONFIG_CORESIGHT_SOURCE_ETM4X is not set -# CONFIG_CORESIGHT_REMOTE_ETM is not set -CONFIG_CORESIGHT_QCOM_REPLICATOR=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_CTI=y -# CONFIG_CORESIGHT_CTI_SAVE_DISABLE is not set -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set -CONFIG_CORESIGHT_QPDI=y -CONFIG_CORESIGHT_SOURCE_DUMMY=y - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -CONFIG_ENCRYPTED_KEYS=y - -# -# Qualcomm Technologies, Inc Per File Encryption security device drivers -# -# CONFIG_PFT is not set -CONFIG_PFK=y -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y -CONFIG_SECURITY=y -# CONFIG_SECURITYFS is not set -CONFIG_SECURITY_NETWORK=y -# CONFIG_SECURITY_NETWORK_XFRM is not set -# CONFIG_SECURITY_PATH is not set -CONFIG_LSM_MMAP_MIN_ADDR=32768 -CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y -CONFIG_HAVE_ARCH_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY=y -# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set -CONFIG_SECURITY_SELINUX=y -# CONFIG_SECURITY_SELINUX_BOOTPARAM is not set -# CONFIG_SECURITY_SELINUX_DISABLE is not set -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_AVC_EXTRA_INFO=y -CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=0 -# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set -# CONFIG_SECURITY_SELINUX_TRAP is not set -CONFIG_SECURITY_SMACK=y -# CONFIG_SECURITY_SMACK_BRINGUP is not set -# CONFIG_SECURITY_SMACK_NETFILTER is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_YAMA is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -# CONFIG_IMA is not set -# CONFIG_EVM is not set -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_SMACK is not set -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_DEFAULT_SECURITY="selinux" -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -# CONFIG_CRYPTO_RSA is not set -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_USER is not set -CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -# CONFIG_CRYPTO_MCRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_ABLK_HELPER=y - -# -# Authenticated Encryption with Associated Data -# -# CONFIG_CRYPTO_CCM is not set -CONFIG_CRYPTO_GCM=y -# CONFIG_CRYPTO_CHACHA20POLY1305 is not set -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_HEH=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_PCBC is not set -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_KEYWRAP is not set - -# -# Hash modes -# -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=y -# CONFIG_CRYPTO_VMAC is not set - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=y -# CONFIG_CRYPTO_CRCT10DIF is not set -CONFIG_CRYPTO_GHASH=y -# CONFIG_CRYPTO_POLY1305 is not set -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_RMD128 is not set -# CONFIG_CRYPTO_RMD160 is not set -# CONFIG_CRYPTO_RMD256 is not set -# CONFIG_CRYPTO_RMD320 is not set -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_WP512 is not set - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_ANUBIS is not set -CONFIG_CRYPTO_ARC4=y -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_SALSA20 is not set -# CONFIG_CRYPTO_CHACHA20 is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_TEA is not set -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_TWOFISH_COMMON=y - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -# CONFIG_CRYPTO_ZLIB is not set -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=y -# CONFIG_CRYPTO_LZ4HC is not set -# CONFIG_CRYPTO_ZSTD is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=y -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -# CONFIG_CRYPTO_DRBG_HASH is not set -# CONFIG_CRYPTO_DRBG_CTR is not set -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_USER_API_HASH is not set -# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -# CONFIG_CRYPTO_USER_API_RNG is not set -# CONFIG_CRYPTO_USER_API_AEAD is not set -CONFIG_CRYPTO_HASH_INFO=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_QCE50=y -# CONFIG_FIPS_ENABLE is not set -CONFIG_CRYPTO_DEV_QCRYPTO=y -CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y -CONFIG_CRYPTO_DEV_QCEDEV=y -CONFIG_CRYPTO_DEV_OTA_CRYPTO=y -# CONFIG_CRYPTO_DEV_CCP is not set -# CONFIG_CRYPTO_DEV_QCE is not set -CONFIG_CRYPTO_DEV_QCOM_ICE=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_PUBLIC_KEY_ALGO_RSA=y -CONFIG_X509_CERTIFICATE_PARSER=y -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -# CONFIG_SIGNED_PE_FILE_VERIFICATION is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem" -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_POLY_HASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_CRYPTO_CRC32_ARM64=y -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_RATIONAL=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IO=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -# CONFIG_CRC_T10DIF is not set -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -# CONFIG_CRC7 is not set -CONFIG_LIBCRC32C=y -# CONFIG_CRC8 is not set -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=y -CONFIG_LZ4_DECOMPRESS=y -# CONFIG_XZ_DEC is not set -# CONFIG_XZ_DEC_BCJ is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=y -CONFIG_TEXTSEARCH_BM=y -CONFIG_TEXTSEARCH_FSM=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_NLATTR=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_CLZ_TAB=y -# CONFIG_CORDIC is not set -# CONFIG_DDR is not set -CONFIG_MPILIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -# CONFIG_SG_SPLIT is not set -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_QMI_ENCDEC=y -# CONFIG_QMI_ENCDEC_DEBUG is not set diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 8b062739ae0d..3f0cb79e1d1e 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -26,6 +26,7 @@ CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -449,7 +450,6 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB=y @@ -501,7 +501,6 @@ CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y -CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_SWITCH=y CONFIG_RTC_CLASS=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index a37953ad33c5..51d31bc72348 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -25,6 +25,7 @@ CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -456,7 +457,6 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB=y @@ -509,7 +509,6 @@ CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y -CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig index e25dcaf77552..274519f7055a 100644 --- a/arch/arm64/configs/sdm660-perf_defconfig +++ b/arch/arm64/configs/sdm660-perf_defconfig @@ -26,6 +26,7 @@ CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -452,7 +453,6 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB=y @@ -507,7 +507,6 @@ CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y -CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_SWITCH=y CONFIG_RTC_CLASS=y diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig index a6d430cbc8e1..8df86944dce9 100644 --- a/arch/arm64/configs/sdm660_defconfig +++ b/arch/arm64/configs/sdm660_defconfig @@ -25,6 +25,7 @@ CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y @@ -457,7 +458,6 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y -CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB=y @@ -513,7 +513,6 @@ CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH_V2=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y -CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index dda13e308385..55101bd86b98 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -171,7 +171,7 @@ alternative_endif .macro user_alt, label, oldinstr, newinstr, cond 9999: alternative_insn "\oldinstr", "\newinstr", \cond - _asm_extable 9999b, \label + _ASM_EXTABLE 9999b, \label .endm /* diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 7dcfd83ff5e8..4fdf307f92bf 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -189,25 +189,22 @@ lr .req x30 // link register /* * Pseudo-ops for PC-relative adr/ldr/str , where - * is within the range +/- 4 GB of the PC when running - * in core kernel context. In module context, a movz/movk sequence - * is used, since modules may be loaded far away from the kernel - * when KASLR is in effect. + * is within the range +/- 4 GB of the PC. */ /* * @dst: destination register (64 bit wide) * @sym: name of the symbol + * @tmp: optional scratch register to be used if == sp, which + * is not allowed in an adrp instruction */ - .macro adr_l, dst, sym -#ifndef MODULE + .macro adr_l, dst, sym, tmp= + .ifb \tmp adrp \dst, \sym add \dst, \dst, :lo12:\sym -#else - movz \dst, #:abs_g3:\sym - movk \dst, #:abs_g2_nc:\sym - movk \dst, #:abs_g1_nc:\sym - movk \dst, #:abs_g0_nc:\sym -#endif + .else + adrp \tmp, \sym + add \dst, \tmp, :lo12:\sym + .endif .endm /* @@ -218,7 +215,6 @@ lr .req x30 // link register * the address */ .macro ldr_l, dst, sym, tmp= -#ifndef MODULE .ifb \tmp adrp \dst, \sym ldr \dst, [\dst, :lo12:\sym] @@ -226,15 +222,6 @@ lr .req x30 // link register adrp \tmp, \sym ldr \dst, [\tmp, :lo12:\sym] .endif -#else - .ifb \tmp - adr_l \dst, \sym - ldr \dst, [\dst] - .else - adr_l \tmp, \sym - ldr \dst, [\tmp] - .endif -#endif .endm /* @@ -244,13 +231,8 @@ lr .req x30 // link register * while needs to be preserved. */ .macro str_l, src, sym, tmp -#ifndef MODULE adrp \tmp, \sym str \src, [\tmp, :lo12:\sym] -#else - adr_l \tmp, \sym - str \src, [\tmp] -#endif .endm /* diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 7d2a15a0f625..801a16dbbdf6 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -23,7 +23,7 @@ struct stackframe { unsigned long sp; unsigned long pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; + unsigned int graph; #endif }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2a2bf5231f6a..50033b91ce4f 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -693,7 +693,7 @@ static int __init parse_kpti(char *str) __kpti_forced = enabled ? 1 : -1; return 0; } -early_param("kpti", parse_kpti); +__setup("kpti=", parse_kpti); #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ static const struct arm64_cpu_capabilities arm64_features[] = { diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index acbc65539682..310f2f463cd4 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -43,7 +42,7 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *kaslr_get_cmdline(void *fdt) +static __init const u8 *get_cmdline(void *fdt) { static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; @@ -87,7 +86,6 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, @@ -110,7 +108,7 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = kaslr_get_cmdline(fdt); + cmdline = get_cmdline(fdt); str = strstr(cmdline, "nokaslr"); if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) return 0; @@ -179,8 +177,5 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); - __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); - return offset; } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 0edcd34b45d2..2ac2abe8a494 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -75,11 +75,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk && tsk->ret_stack && (frame->pc == (unsigned long)return_to_handler)) { - if (WARN_ON_ONCE(frame->graph == -1)) - return -EINVAL; - if (frame->graph < -1) - frame->graph += FTRACE_NOTRACE_DEPTH; - /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 5d9076e86200..59779699a1a4 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -53,7 +53,7 @@ unsigned long profile_pc(struct pt_regs *regs) frame.sp = regs->sp; frame.pc = regs->pc; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = current->curr_ret_stack; + frame.graph = -1; /* no task info */ #endif do { int ret = unwind_frame(NULL, &frame); diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 33ed8a687f46..d0e5fe5fbf22 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -19,14 +19,36 @@ #include #include #include +#include #include #include #include #include +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account during load balance. A per cpu structure + * is preferred because each CPU updates its own cpu_power field during the + * load balance except for idle cores. One idle core is selected to run the + * rebalance_domains for all idle cores and the cpu_power can be updated + * during this sequence. + */ static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +static void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu) { #ifdef CONFIG_CPU_FREQ @@ -181,6 +203,46 @@ static int __init parse_cluster(struct device_node *cluster, int depth) return 0; } +struct cpu_efficiency { + const char *compatible; + unsigned long efficiency; +}; + +/* + * Table of relative efficiency of each processors + * The efficiency value must fit in 20bit and the final + * cpu_scale value must be in the range + * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2 + * in order to return at most 1 when DIV_ROUND_CLOSEST + * is used to compute the capacity of a CPU. + * Processors that are not defined in the table, + * use the default SCHED_CAPACITY_SCALE value for cpu_scale. + */ +static const struct cpu_efficiency table_efficiency[] = { + { NULL, }, +}; + +static unsigned long *__cpu_capacity; +#define cpu_capacity(cpu) __cpu_capacity[cpu] + +static unsigned long middle_capacity = 1; + +static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE; + +unsigned long arch_get_cpu_efficiency(int cpu) +{ + return per_cpu(cpu_efficiency, cpu); +} +EXPORT_SYMBOL(arch_get_cpu_efficiency); + +/* + * Iterate all CPUs' descriptor in DT and compute the efficiency + * (as per table_efficiency). Also calculate a middle efficiency + * as close as possible to (max{eff_i} - min{eff_i}) / 2 + * This is later used to scale the cpu_power field such that an + * 'average' CPU is of middle power. Also see the comments near + * table_efficiency[] and update_cpu_power(). + */ static int __init parse_dt_topology(void) { struct device_node *cn, *map; @@ -220,6 +282,107 @@ out: return ret; } +static void __init parse_dt_cpu_power(void) +{ + const struct cpu_efficiency *cpu_eff; + struct device_node *cn; + unsigned long min_capacity = ULONG_MAX; + unsigned long max_capacity = 0; + unsigned long capacity = 0; + int cpu; + + __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), + GFP_NOWAIT); + + for_each_possible_cpu(cpu) { + const u32 *rate; + int len; + u32 efficiency; + + /* Too early to use cpu->of_node */ + cn = of_get_cpu_node(cpu, NULL); + if (!cn) { + pr_err("Missing device node for CPU %d\n", cpu); + continue; + } + + /* + * The CPU efficiency value passed from the device tree + * overrides the value defined in the table_efficiency[] + */ + if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) { + + for (cpu_eff = table_efficiency; + cpu_eff->compatible; cpu_eff++) + + if (of_device_is_compatible(cn, + cpu_eff->compatible)) + break; + + if (cpu_eff->compatible == NULL) { + pr_warn("%s: Unknown CPU type\n", + cn->full_name); + continue; + } + + efficiency = cpu_eff->efficiency; + } + + per_cpu(cpu_efficiency, cpu) = efficiency; + + rate = of_get_property(cn, "clock-frequency", &len); + if (!rate || len != 4) { + pr_err("%s: Missing clock-frequency property\n", + cn->full_name); + continue; + } + + capacity = ((be32_to_cpup(rate)) >> 20) * efficiency; + + /* Save min capacity of the system */ + if (capacity < min_capacity) + min_capacity = capacity; + + /* Save max capacity of the system */ + if (capacity > max_capacity) + max_capacity = capacity; + + cpu_capacity(cpu) = capacity; + } + + /* If min and max capacities are equal we bypass the update of the + * cpu_scale because all CPUs have the same capacity. Otherwise, we + * compute a middle_capacity factor that will ensure that the capacity + * of an 'average' CPU of the system will be as close as possible to + * SCHED_CAPACITY_SCALE, which is the default value, but with the + * constraint explained near table_efficiency[]. + */ + if (min_capacity == max_capacity) + return; + else if (4 * max_capacity < (3 * (max_capacity + min_capacity))) + middle_capacity = (min_capacity + max_capacity) + >> (SCHED_CAPACITY_SHIFT+1); + else + middle_capacity = ((max_capacity / 3) + >> (SCHED_CAPACITY_SHIFT-1)) + 1; +} + +/* + * Look for a customed capacity of a CPU in the cpu_topo_data table during the + * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the + * function returns directly for SMP system. + */ +static void update_cpu_power(unsigned int cpu) +{ + if (!cpu_capacity(cpu)) + return; + + set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity); + + pr_info("CPU%u: update cpu_power %lu\n", + cpu, arch_scale_freq_power(NULL, cpu)); +} + /* * cpu topology table */ @@ -281,7 +444,7 @@ static void update_cpu_capacity(unsigned int cpu) { unsigned long capacity = SCHED_CAPACITY_SCALE; - if (cpu_core_energy(cpu)) { + if (sched_energy_aware && cpu_core_energy(cpu)) { int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1; capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap; } @@ -353,6 +516,7 @@ void store_cpu_topology(unsigned int cpuid) topology_populated: update_siblings_masks(cpuid); + update_cpu_power(cpuid); update_cpu_capacity(cpuid); } @@ -374,6 +538,14 @@ static void __init reset_cpu_topology(void) } } +static void __init reset_cpu_power(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + set_power_scale(cpu, SCHED_CAPACITY_SCALE); +} + void __init init_cpu_topology(void) { int cpu; @@ -392,5 +564,7 @@ void __init init_cpu_topology(void) update_siblings_masks(cpu); } + reset_cpu_power(); + parse_dt_cpu_power(); init_sched_energy_costs(); } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 416aea2c6719..cb0fdac03fe9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -495,6 +495,14 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) } #endif + if (show_unhandled_signals_ratelimited()) { + pr_info("%s[%d]: syscall %d\n", current->comm, + task_pid_nr(current), (int)regs->syscallno); + dump_instr("", regs); + if (user_mode(regs)) + __show_regs(regs); + } + return sys_ni_syscall(); } diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 3dc1198b5ec9..9de0ffc369c5 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -39,13 +39,6 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } - /* - * Discard .note.gnu.property sections which are unused and have - * different alignment requirement from vDSO note sections. - */ - /DISCARD/ : { - *(.note.GNU-stack .note.gnu.property) - } .note : { *(.note.*) } :text :note . = ALIGN(16); @@ -66,6 +59,7 @@ SECTIONS PROVIDE(end = .); /DISCARD/ : { + *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) *(.bss .sbss .dynbss .dynsbss) } diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 2d705ab815e4..232f787a088a 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include @@ -105,18 +104,12 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); */ int valid_phys_addr_range(phys_addr_t addr, size_t size) { - /* - * Check whether addr is covered by a memory region without the - * MEMBLOCK_NOMAP attribute, and whether that region covers the - * entire range. In theory, this could lead to false negatives - * if the range is covered by distinct but adjacent memory regions - * that only differ in other attributes. However, few of such - * attributes have been defined, and it is debatable whether it - * follows that /dev/mem read() calls should be able traverse - * such boundaries. - */ - return memblock_is_region_memory(addr, size) && - memblock_is_map_memory(addr); + if (addr < PHYS_OFFSET) + return 0; + if (addr + size > __pa(high_memory - 1) + 1) + return 0; + + return 1; } /* diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e8b8590f553a..b5ecf01a1e8d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1333,18 +1333,13 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) int __init arch_ioremap_pud_supported(void) { - /* - * Only 4k granule supports level 1 block mappings. - * SW table walks can't handle removal of intermediate entries. - */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && - !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); + /* only 4k granule supports level 1 block mappings */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES); } int __init arch_ioremap_pmd_supported(void) { - /* See arch_ioremap_pud_supported() */ - return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); + return 1; } int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 3e8ffc8871f7..b78688806652 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -285,8 +285,8 @@ ENTRY(__cpu_setup) cmp x9, #2 b.lt 1f #ifdef CONFIG_ARM64_ERRATUM_1024718 - /* Disable hardware DBM on Cortex-A55 all versions */ - cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(0xf, 0xf), x1, x2, x3, x4 + /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */ + cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4 cbnz x1, 1f #endif orr x10, x10, #TCR_HD // hardware Dirty flag update diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index 3e33a9844d99..dc2d16ce8a0d 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c @@ -62,9 +62,6 @@ int main(void) OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE, thread_info, preempt_count); -#ifdef CONFIG_PREEMPTION - DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); -#endif return 0; } diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index b5c050fe23a5..5f268c1071b3 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -70,8 +70,13 @@ SECTIONS _end = .; + /DISCARD/ : { + EXIT_TEXT + EXIT_DATA + EXIT_CALL + } + STABS_DEBUG DWARF_DEBUG - DISCARDS } diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c index e5dfed1cf151..885c9626d5e0 100644 --- a/arch/hexagon/lib/io.c +++ b/arch/hexagon/lib/io.c @@ -40,7 +40,6 @@ void __raw_readsw(const void __iomem *addr, void *data, int len) *dst++ = *src; } -EXPORT_SYMBOL(__raw_readsw); /* * __raw_writesw - read words a short at a time @@ -61,7 +60,6 @@ void __raw_writesw(void __iomem *addr, const void *data, int len) } -EXPORT_SYMBOL(__raw_writesw); /* Pretty sure len is pre-adjusted for the length of the access already */ void __raw_readsl(const void __iomem *addr, void *data, int len) @@ -77,7 +75,6 @@ void __raw_readsl(const void __iomem *addr, void *data, int len) } -EXPORT_SYMBOL(__raw_readsl); void __raw_writesl(void __iomem *addr, const void *data, int len) { @@ -92,4 +89,3 @@ void __raw_writesl(void __iomem *addr, const void *data, int len) } -EXPORT_SYMBOL(__raw_writesl); diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug index ee6c7f75f479..de9d507ba0fd 100644 --- a/arch/ia64/Kconfig.debug +++ b/arch/ia64/Kconfig.debug @@ -41,7 +41,7 @@ config DISABLE_VHPT config IA64_DEBUG_CMPXCHG bool "Turn on compare-and-exchange bug checking (slow!)" - depends on DEBUG_KERNEL && PRINTK + depends on DEBUG_KERNEL help Selecting this option turns on bug checking for the IA-64 compare-and-exchange instructions. This is slow! Itaniums diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h index 9d3d4fb87a7a..845143990a1d 100644 --- a/arch/ia64/include/asm/ptrace.h +++ b/arch/ia64/include/asm/ptrace.h @@ -53,7 +53,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) { - return regs->r12; + /* FIXME: should this be bspstore + nr_dirty regs? */ + return regs->ar_bspstore; } static inline int is_syscall_success(struct pt_regs *regs) @@ -77,6 +78,11 @@ static inline long regs_return_value(struct pt_regs *regs) unsigned long __ip = instruction_pointer(regs); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \ }) +/* + * Why not default? Because user_stack_pointer() on ia64 gives register + * stack backing store instead... + */ +#define current_user_stack_pointer() (current_pt_regs()->r12) /* given a pointer to a task_struct, return the user's pt_regs */ # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h index ec909eec0b4c..1d0b875fec44 100644 --- a/arch/ia64/include/asm/syscall.h +++ b/arch/ia64/include/asm/syscall.h @@ -35,7 +35,7 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return regs->r10 == -1 ? -regs->r8:0; + return regs->r10 == -1 ? regs->r8:0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 249bdf1b4ea2..39d64e0df1de 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -251,16 +251,6 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre tlb->end_addr = address + PAGE_SIZE; } -static inline void -tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, - unsigned long size) -{ - if (tlb->start_addr > address) - tlb->start_addr = address; - if (tlb->end_addr < address + size) - tlb->end_addr = address + size; -} - #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 0d5b64ddcdd1..2889412e03eb 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1858,7 +1858,7 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = (void *)__get_free_pages(GFP_ATOMIC, + data = (void *)__get_free_pages(GFP_KERNEL, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 3503d488e9b3..94f8bf777afa 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -343,7 +343,7 @@ init_record_index_pools(void) /* - 2 - */ sect_min_size = sal_log_sect_min_sizes[0]; - for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++) + for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++) if (sect_min_size > sal_log_sect_min_sizes[i]) sect_min_size = sal_log_sect_min_sizes[i]; diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index a757b123ebaf..6f54d511cc50 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -2140,39 +2140,27 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) { struct syscall_get_set_args *args = data; struct pt_regs *pt = args->regs; - unsigned long *krbs, cfm, ndirty, nlocals, nouts; + unsigned long *krbs, cfm, ndirty; int i, count; if (unw_unwind_to_user(info) < 0) return; - /* - * We get here via a few paths: - * - break instruction: cfm is shared with caller. - * syscall args are in out= regs, locals are non-empty. - * - epsinstruction: cfm is set by br.call - * locals don't exist. - * - * For both cases argguments are reachable in cfm.sof - cfm.sol. - * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ] - */ cfm = pt->cr_ifs; - nlocals = (cfm >> 7) & 0x7f; /* aka sol */ - nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */ krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); count = 0; if (in_syscall(pt)) - count = min_t(int, args->n, nouts); + count = min_t(int, args->n, cfm & 0x7f); - /* Iterate over outs. */ for (i = 0; i < count; i++) { - int j = ndirty + nlocals + i + args->i; if (args->rw) - *ia64_rse_skip_regs(krbs, j) = args->args[i]; + *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = + args->args[i]; else - args->args[i] = *ia64_rse_skip_regs(krbs, j); + args->args[i] = *ia64_rse_skip_regs(krbs, + ndirty + i + args->i); } if (!args->rw) { diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 3b0c892953ab..878626805369 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -99,7 +99,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ -static int early_nr_cpus_node(int node) +static int __meminit early_nr_cpus_node(int node) { int cpu, n = 0; @@ -114,7 +114,7 @@ static int early_nr_cpus_node(int node) * compute_pernodesize - compute size of pernode data * @node: the node id. */ -static unsigned long compute_pernodesize(int node) +static unsigned long __meminit compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; @@ -411,7 +411,7 @@ static void __init reserve_pernode_space(void) } } -static void scatter_node_data(void) +static void __meminit scatter_node_data(void) { pg_data_t **dst; int node; diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 16a737b9bd66..61dc643c0b05 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -184,7 +184,6 @@ config INIT_LCD config MEMORY_RESERVE int "Memory reservation (MiB)" depends on (UCSIMM || UCDIMM) - default 0 help Reserve certain memory regions on 68x328 based boards. diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index 7d695fc7a2d0..a0985fd088d1 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -260,8 +260,8 @@ static void __exit nfeth_cleanup(void) for (i = 0; i < MAX_UNIT; i++) { if (nfeth_dev[i]) { - unregister_netdev(nfeth_dev[i]); - free_netdev(nfeth_dev[i]); + unregister_netdev(nfeth_dev[0]); + free_netdev(nfeth_dev[0]); } } free_irq(nfEtherIRQ, nfeth_interrupt); diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h index 2238232c360e..932faa35655b 100644 --- a/arch/m68k/include/asm/raw_io.h +++ b/arch/m68k/include/asm/raw_io.h @@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size); * two accesses to memory, which may be undesirable for some devices. */ #define in_8(addr) \ - ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; }) + ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) #define in_be16(addr) \ - ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; }) + ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) #define in_be32(addr) \ - ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; }) + ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) #define in_le16(addr) \ - ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; }) + ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) #define in_le32(addr) \ - ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; }) + ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) -#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b)) -#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w)) -#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l)) -#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w)) -#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l)) +#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) +#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) +#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) +#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) +#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) #define raw_inb in_8 #define raw_inw in_be16 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f00329d8210d..17aa92272f9b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -264,9 +264,6 @@ config BCM63XX select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_HAS_EARLY_PRINTK - select SYS_HAS_CPU_BMIPS32_3300 - select SYS_HAS_CPU_BMIPS4350 - select SYS_HAS_CPU_BMIPS4380 select SWAP_IO_SPACE select ARCH_REQUIRE_GPIOLIB select HAVE_CLK @@ -3052,7 +3049,6 @@ config MIPS32_N32 config BINFMT_ELF32 bool default y if MIPS32_O32 || MIPS32_N32 - select ELFCORE endmenu diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 283ee556fea5..0e4516d631a6 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -276,7 +276,7 @@ LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ - egrep -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \ + egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g') ifdef CONFIG_64BIT CHECKFLAGS += -m64 diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c index c05f7376148a..0fc53e08a894 100644 --- a/arch/mips/alchemy/board-xxs1500.c +++ b/arch/mips/alchemy/board-xxs1500.c @@ -30,7 +30,6 @@ #include #include #include -#include #include const char *get_system_type(void) diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig index 7ca7384fd5c9..e970fd9cf769 100644 --- a/arch/mips/bcm47xx/Kconfig +++ b/arch/mips/bcm47xx/Kconfig @@ -26,7 +26,6 @@ config BCM47XX_BCMA select BCMA select BCMA_HOST_SOC select BCMA_DRIVER_MIPS - select BCMA_DRIVER_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI select BCMA_DRIVER_GPIO default y diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index fe90c1c86a60..637565284732 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -333,18 +333,6 @@ void clk_disable(struct clk *clk) EXPORT_SYMBOL(clk_disable); -struct clk *clk_get_parent(struct clk *clk) -{ - return NULL; -} -EXPORT_SYMBOL(clk_get_parent); - -int clk_set_parent(struct clk *clk, struct clk *parent) -{ - return 0; -} -EXPORT_SYMBOL(clk_set_parent); - unsigned long clk_get_rate(struct clk *clk) { return clk->rate; diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index a1a54a3af03b..080cd53bac36 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -11,8 +11,6 @@ * option) any later version. */ -#define DISABLE_BRANCH_PROFILING - #include #include #include diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index 4747a4694669..982bc0685330 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -67,13 +67,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - /* - * clear the huge pte entry firstly, so that the other smp threads will - * not get old pte entry after finishing flush_tlb_page and before - * setting new huge pte entry - */ - huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); - flush_tlb_page(vma, addr); + flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); } static inline int huge_pte_none(pte_t pte) diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index bc414657601c..d92cf59bdae6 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -303,7 +303,7 @@ enum cvmx_chip_types_enum { /* Functions to return string based on type */ #define ENUM_BRD_TYPE_CASE(x) \ - case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */ + case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */ static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type) { @@ -392,7 +392,7 @@ static inline const char *cvmx_board_type_to_string(enum } #define ENUM_CHIP_TYPE_CASE(x) \ - case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */ + case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */ static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type) { diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 6ba3b4bca457..261b2ce579bb 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -91,7 +91,6 @@ SECTIONS INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA - PAGE_ALIGNED_DATA(PAGE_SIZE) CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 80bdcb26ef8a..a0706fd4ce0a 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -165,12 +165,6 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) return NULL; } -int clk_set_parent(struct clk *clk, struct clk *parent) -{ - return 0; -} -EXPORT_SYMBOL(clk_set_parent); - static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 5526b89a21a0..a7057a06c096 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -245,7 +245,7 @@ static void ltq_hw_irqdispatch(int module) do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); /* if this is a EBU irq, we need to ack it or get a deadlock */ - if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0) + if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c index 35b7d1a0cad3..34a116e840d8 100644 --- a/arch/mips/lantiq/xway/dma.c +++ b/arch/mips/lantiq/xway/dma.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include @@ -40,7 +39,6 @@ #define LTQ_DMA_PCTRL 0x44 #define LTQ_DMA_IRNEN 0xf4 -#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */ #define DMA_DESCPT BIT(3) /* descriptor complete irq */ #define DMA_TX BIT(8) /* TX channel direction */ #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ @@ -51,6 +49,7 @@ #define DMA_POLL BIT(31) /* turn on channel polling */ #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ #define DMA_2W_BURST BIT(1) /* 2 word burst length */ +#define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ @@ -217,7 +216,7 @@ ltq_dma_init(struct platform_device *pdev) { struct clk *clk; struct resource *res; - unsigned int id, nchannels; + unsigned id; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -233,24 +232,21 @@ ltq_dma_init(struct platform_device *pdev) clk_enable(clk); ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); - usleep_range(1, 10); - /* disable all interrupts */ ltq_dma_w32(0, LTQ_DMA_IRNEN); /* reset/configure each channel */ - id = ltq_dma_r32(LTQ_DMA_ID); - nchannels = ((id & DMA_ID_CHNR) >> 20); - for (i = 0; i < nchannels; i++) { + for (i = 0; i < DMA_MAX_CHANNEL; i++) { ltq_dma_w32(i, LTQ_DMA_CS); ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); } + id = ltq_dma_r32(LTQ_DMA_ID); dev_info(&pdev->dev, "Init done - hw rev: %X, ports: %d, channels: %d\n", - id & 0x1f, (id >> 16) & 0xf, nchannels); + id & 0x1f, (id >> 16) & 0xf, id >> 20); return 0; } diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index fd50aa7b178a..272af8ac2425 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -37,7 +37,7 @@ */ notrace void arch_local_irq_disable(void) { - preempt_disable_notrace(); + preempt_disable(); __asm__ __volatile__( " .set push \n" @@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void) : /* no inputs */ : "memory"); - preempt_enable_notrace(); + preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_disable); @@ -62,7 +62,7 @@ notrace unsigned long arch_local_irq_save(void) { unsigned long flags; - preempt_disable_notrace(); + preempt_disable(); __asm__ __volatile__( " .set push \n" @@ -79,7 +79,7 @@ notrace unsigned long arch_local_irq_save(void) : /* no inputs */ : "memory"); - preempt_enable_notrace(); + preempt_enable(); return flags; } @@ -89,7 +89,7 @@ notrace void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; - preempt_disable_notrace(); + preempt_disable(); __asm__ __volatile__( " .set push \n" @@ -107,7 +107,7 @@ notrace void arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); - preempt_enable_notrace(); + preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index a7256a7d7936..8116650e14ab 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1464,7 +1464,7 @@ static int probe_scache(void) return 1; } -static void loongson2_sc_init(void) +static void __init loongson2_sc_init(void) { struct cpuinfo_mips *c = ¤t_cpu_data; diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c index 5d2e857f96b5..e1dd1c1d3fde 100644 --- a/arch/mips/mti-malta/malta-platform.c +++ b/arch/mips/mti-malta/malta-platform.c @@ -52,8 +52,7 @@ static struct plat_serial8250_port uart8250_data[] = { .mapbase = 0x1f000900, /* The CBUS UART */ .irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2, .uartclk = 3686400, /* Twice the usual clk! */ - .iotype = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) ? - UPIO_MEM32BE : UPIO_MEM32, + .iotype = UPIO_MEM32, .flags = CBUS_UART_FLAGS, .regshift = 3, }, diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index df2e7e3b2a5a..f9eda5d8f82c 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include @@ -28,7 +27,6 @@ __iomem void *rt_sysc_membase; __iomem void *rt_memc_membase; -EXPORT_SYMBOL_GPL(rt_sysc_membase); __iomem void *plat_of_remap_node(const char *node) { diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index d72dd0d2ff59..fb4b3520cdc6 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -18,14 +18,14 @@ static int a20r_set_periodic(struct clock_event_device *evt) { *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34; wmb(); - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV & 0xff; + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4; wmb(); - *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV & 0xff; + *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV; wmb(); *(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8; wmb(); diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h index 921589b45bc2..cfb1be441dec 100644 --- a/arch/mips/vdso/vdso.h +++ b/arch/mips/vdso/vdso.h @@ -81,7 +81,7 @@ static inline const union mips_vdso_data *get_vdso_data(void) static inline void __iomem *get_gic(const union mips_vdso_data *data) { - return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE; + return (void __iomem *)data - PAGE_SIZE; } #endif /* CONFIG_CLKSRC_MIPS_GIC */ diff --git a/arch/nios2/include/asm/irqflags.h b/arch/nios2/include/asm/irqflags.h index 0338fcb88203..75ab92e639f8 100644 --- a/arch/nios2/include/asm/irqflags.h +++ b/arch/nios2/include/asm/irqflags.h @@ -22,7 +22,7 @@ static inline unsigned long arch_local_save_flags(void) { - return RDCTL(CTL_FSTATUS); + return RDCTL(CTL_STATUS); } /* @@ -31,7 +31,7 @@ static inline unsigned long arch_local_save_flags(void) */ static inline void arch_local_irq_restore(unsigned long flags) { - WRCTL(CTL_FSTATUS, flags); + WRCTL(CTL_STATUS, flags); } static inline void arch_local_irq_disable(void) diff --git a/arch/nios2/include/asm/registers.h b/arch/nios2/include/asm/registers.h index 33824f2ad1ab..615bce19b546 100644 --- a/arch/nios2/include/asm/registers.h +++ b/arch/nios2/include/asm/registers.h @@ -24,7 +24,7 @@ #endif /* control register numbers */ -#define CTL_FSTATUS 0 +#define CTL_STATUS 0 #define CTL_ESTATUS 1 #define CTL_BSTATUS 2 #define CTL_IENABLE 3 diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform index 78ffc0bf4ebe..d3e5df9fb36b 100644 --- a/arch/nios2/platform/Kconfig.platform +++ b/arch/nios2/platform/Kconfig.platform @@ -37,7 +37,6 @@ config NIOS2_DTB_PHYS_ADDR config NIOS2_DTB_SOURCE_BOOL bool "Compile and link device tree into kernel image" - depends on !COMPILE_TEST default n help This allows you to specify a dts (device tree source) file diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h deleted file mode 100644 index 7538294721be..000000000000 --- a/arch/openrisc/include/asm/barrier.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_BARRIER_H -#define __ASM_BARRIER_H - -#define mb() asm volatile ("l.msync" ::: "memory") - -#include - -#endif /* __ASM_BARRIER_H */ diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 92cdc1e56b60..3fbe420f49c4 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -491,7 +491,6 @@ EXCEPTION_ENTRY(_external_irq_handler) l.bnf 1f // ext irq enabled, all ok. l.nop -#ifdef CONFIG_PRINTK l.addi r1,r1,-0x8 l.movhi r3,hi(42f) l.ori r3,r3,lo(42f) @@ -505,7 +504,6 @@ EXCEPTION_ENTRY(_external_irq_handler) .string "\n\rESR interrupt bug: in _external_irq_handler (ESR %x)\n\r" .align 4 .previous -#endif l.ori r4,r4,SPR_SR_IEE // fix the bug // l.sw PT_SR(r1),r4 diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index e5f5b69a7b7b..b4ed8b36e078 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -278,8 +278,6 @@ void calibrate_delay(void) pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", loops_per_jiffy / (500000 / HZ), (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); - - of_node_put(cpu); } void __init setup_arch(char **cmdline_p) diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 088888fcf8df..80e742a1c162 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -174,7 +174,7 @@ extern int npmem_ranges; #include #include -#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET)) +#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) /* DEFINITION OF THE ZERO-PAGE (PAG0) */ /* based on work by Jason Eckhardt (jason@equator.com) */ diff --git a/arch/parisc/install.sh b/arch/parisc/install.sh index a8c49815f58c..6f68784fea25 100644 --- a/arch/parisc/install.sh +++ b/arch/parisc/install.sh @@ -39,7 +39,6 @@ verify "$3" if [ -n "${INSTALLKERNEL}" ]; then if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi - if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi fi # Default install diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 86e79e9df265..3b7b022384a0 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -1849,8 +1849,8 @@ syscall_restore: LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* Are we being ptraced? */ - LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 - ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2 + ldw TASK_FLAGS(%r1),%r19 + ldi _TIF_SYSCALL_TRACE_MASK,%r2 and,COND(=) %r19,%r2,%r0 b,n syscall_restore_rfi diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index d9a4d6ffc0a8..2264f68f3c2f 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -239,12 +239,6 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs, #endif usp = (regs->gr[30] & ~(0x01UL)); -#ifdef CONFIG_64BIT - if (is_compat_task()) { - /* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */ - usp = (compat_uint_t)usp; - } -#endif /*FIXME: frame_size parameter is unused, remove it. */ frame = get_sigframe(&ksig->ka, usp, sizeof(*frame)); diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 5a2c4771e9d1..52e85973a283 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -75,10 +74,7 @@ enum ipi_message_type { IPI_CALL_FUNC, IPI_CPU_START, IPI_CPU_STOP, - IPI_CPU_TEST, -#ifdef CONFIG_KGDB - IPI_ENTER_KGDB, -#endif + IPI_CPU_TEST }; @@ -174,12 +170,7 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CPU_TEST: smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); break; -#ifdef CONFIG_KGDB - case IPI_ENTER_KGDB: - smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu); - kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); - break; -#endif + default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); @@ -235,12 +226,6 @@ send_IPI_allbutself(enum ipi_message_type op) } } -#ifdef CONFIG_KGDB -void kgdb_roundup_cpus(void) -{ - send_IPI_allbutself(IPI_ENTER_KGDB); -} -#endif inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 55e7ba06511d..6f61a17e2485 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -796,7 +796,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) * unless pagefault_disable() was called before. */ - if (faulthandler_disabled() || fault_space == 0) + if (fault_space == 0 && !faulthandler_disabled()) { /* Clean up and return if in exception table. */ if (fixup_exception(regs)) diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index cbfbba4bbee6..8d49614d600d 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -354,7 +354,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); - return ret; + return 0; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { @@ -411,7 +411,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %3\n" +" dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" @@ -423,7 +423,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%3)\n" +"3: stw %1,0(%%sr1,%1)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" " copy %%r0, %0\n" @@ -611,6 +611,7 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } +#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: @@ -621,23 +622,22 @@ void handle_unaligned(struct pt_regs *regs) flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; -#ifdef CONFIG_PA20 case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; -#endif } +#endif switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: flop=1; - ret = emulate_ldw(regs, R2(regs->iir), 1); + ret = emulate_ldw(regs, R2(regs->iir),0); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir), 0); + ret = emulate_ldw(regs, R2(regs->iir),1); break; case OPCODE_FSTW_L: diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ad6545dafe03..d72f00310683 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -940,9 +940,9 @@ void flush_tlb_all(void) { int do_recycle; + __inc_irq_stat(irq_tlb_count); do_recycle = 0; spin_lock(&sid_lock); - __inc_irq_stat(irq_tlb_count); if (dirty_space_ids > RECYCLE_THRESHOLD) { BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ get_dirty_sids(&recycle_ndirty,recycle_dirty_array); @@ -961,8 +961,8 @@ void flush_tlb_all(void) #else void flush_tlb_all(void) { - spin_lock(&sid_lock); __inc_irq_stat(irq_tlb_count); + spin_lock(&sid_lock); flush_tlb_all_local(NULL); recycle_sids(); spin_unlock(&sid_lock); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 356a48c18dbd..ada247b339b5 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -579,7 +579,7 @@ config PPC_64K_PAGES config PPC_256K_PAGES bool "256k page size" - depends on 44x && !STDBINUTILS && !PPC_47x + depends on 44x && !STDBINUTILS help Make the page size 256k. diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 7e62572215ce..3a510f4a6b68 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -350,7 +350,6 @@ config STRICT_DEVMEM config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION - depends on PCI || IBMVIO help Provide fault-injection capability for IOMMU. Each device can be selectively enabled via the fail_iommu property. diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index 51669cdbf011..a3550e8f1a77 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -49,6 +49,9 @@ p_end: .long _end p_pstack: .long _platform_stack_top #endif + .globl _zimage_start + /* Clang appears to require the .weak directive to be after the symbol + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */ .weak _zimage_start _zimage_start: .globl _zimage_start_lib diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c index 27c84b82b588..a7e21a35c03a 100644 --- a/arch/powerpc/boot/devtree.c +++ b/arch/powerpc/boot/devtree.c @@ -17,7 +17,6 @@ #include "string.h" #include "stdio.h" #include "ops.h" -#include "of.h" void dt_fixup_memory(u64 start, u64 size) { @@ -28,25 +27,21 @@ void dt_fixup_memory(u64 start, u64 size) root = finddevice("/"); if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0) naddr = 2; - else - naddr = be32_to_cpu(naddr); if (naddr < 1 || naddr > 2) fatal("Can't cope with #address-cells == %d in /\n\r", naddr); if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0) nsize = 1; - else - nsize = be32_to_cpu(nsize); if (nsize < 1 || nsize > 2) fatal("Can't cope with #size-cells == %d in /\n\r", nsize); i = 0; if (naddr == 2) - memreg[i++] = cpu_to_be32(start >> 32); - memreg[i++] = cpu_to_be32(start & 0xffffffff); + memreg[i++] = start >> 32; + memreg[i++] = start & 0xffffffff; if (nsize == 2) - memreg[i++] = cpu_to_be32(size >> 32); - memreg[i++] = cpu_to_be32(size & 0xffffffff); + memreg[i++] = size >> 32; + memreg[i++] = size & 0xffffffff; memory = finddevice("/memory"); if (! memory) { @@ -54,9 +49,9 @@ void dt_fixup_memory(u64 start, u64 size) setprop_str(memory, "device_type", "memory"); } - printf("Memory <- <0x%x", be32_to_cpu(memreg[0])); + printf("Memory <- <0x%x", memreg[0]); for (i = 1; i < (naddr + nsize); i++) - printf(" 0x%x", be32_to_cpu(memreg[i])); + printf(" 0x%x", memreg[i]); printf("> (%ldMB)\n\r", (unsigned long)(size >> 20)); setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32)); @@ -74,10 +69,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus) printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus)); while ((devp = find_node_by_devtype(devp, "cpu"))) { - setprop_val(devp, "clock-frequency", cpu_to_be32(cpu)); - setprop_val(devp, "timebase-frequency", cpu_to_be32(tb)); + setprop_val(devp, "clock-frequency", cpu); + setprop_val(devp, "timebase-frequency", tb); if (bus > 0) - setprop_val(devp, "bus-frequency", cpu_to_be32(bus)); + setprop_val(devp, "bus-frequency", bus); } timebase_period_ns = 1000000000 / tb; @@ -89,7 +84,7 @@ void dt_fixup_clock(const char *path, u32 freq) if (devp) { printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq)); - setprop_val(devp, "clock-frequency", cpu_to_be32(freq)); + setprop_val(devp, "clock-frequency", freq); } } @@ -142,12 +137,8 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize) { if (getprop(node, "#address-cells", naddr, 4) != 4) *naddr = 2; - else - *naddr = be32_to_cpu(*naddr); if (getprop(node, "#size-cells", nsize, 4) != 4) *nsize = 1; - else - *nsize = be32_to_cpu(*nsize); } static void copy_val(u32 *dest, u32 *src, int naddr) @@ -176,9 +167,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr) int i, carry = 0; for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) { - u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry; + u64 tmp = (u64)reg[i] + add[i] + carry; carry = tmp >> 32; - reg[i] = cpu_to_be32((u32)tmp); + reg[i] = (u32)tmp; } return !carry; @@ -193,18 +184,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize) u32 end; for (i = 0; i < MAX_ADDR_CELLS; i++) { - if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i])) + if (reg[i] < range[i]) return 0; - if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i])) + if (reg[i] > range[i]) break; } for (i = 0; i < MAX_ADDR_CELLS; i++) { - end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]); + end = range[i] + rangesize[i]; - if (be32_to_cpu(reg[i]) < end) + if (reg[i] < end) break; - if (be32_to_cpu(reg[i]) > end) + if (reg[i] > end) return 0; } @@ -253,6 +244,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, return 0; dt_get_reg_format(parent, &naddr, &nsize); + if (nsize > 2) return 0; @@ -264,10 +256,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, copy_val(last_addr, prop_buf + offset, naddr); - ret_size = be32_to_cpu(prop_buf[offset + naddr]); + ret_size = prop_buf[offset + naddr]; if (nsize == 2) { ret_size <<= 32; - ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]); + ret_size |= prop_buf[offset + naddr + 1]; } for (;;) { @@ -290,6 +282,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, offset = find_range(last_addr, prop_buf, prev_naddr, naddr, prev_nsize, buflen / 4); + if (offset < 0) return 0; @@ -307,7 +300,8 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr, if (naddr > 2) return 0; - ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]); + ret_addr = ((u64)last_addr[2] << 32) | last_addr[3]; + if (sizeof(void *) == 4 && (ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL || ret_addr + ret_size > 0x100000000ULL)) @@ -360,14 +354,11 @@ int dt_is_compatible(void *node, const char *compat) int dt_get_virtual_reg(void *node, void **addr, int nres) { unsigned long xaddr; - int n, i; + int n; n = getprop(node, "virtual-reg", addr, nres * 4); - if (n > 0) { - for (i = 0; i < n/4; i ++) - ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]); + if (n > 0) return n / 4; - } for (n = 0; n < nres; n++) { if (!dt_xlate_reg(node, n, &xaddr, NULL)) diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts index 1c8fe20752e6..0e00e508eaa6 100644 --- a/arch/powerpc/boot/dts/charon.dts +++ b/arch/powerpc/boot/dts/charon.dts @@ -39,7 +39,7 @@ }; }; - memory@0 { + memory { device_type = "memory"; reg = <0x00000000 0x08000000>; // 128MB }; diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts index bf511255f3ae..955bff629df3 100644 --- a/arch/powerpc/boot/dts/digsy_mtc.dts +++ b/arch/powerpc/boot/dts/digsy_mtc.dts @@ -20,7 +20,7 @@ model = "intercontrol,digsy-mtc"; compatible = "intercontrol,digsy-mtc"; - memory@0 { + memory { reg = <0x00000000 0x02000000>; // 32MB }; diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi index 404f570ebe23..af12ead88c5f 100644 --- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi @@ -122,15 +122,7 @@ }; /include/ "pq3-i2c-0.dtsi" - i2c@3000 { - fsl,i2c-erratum-a004447; - }; - /include/ "pq3-i2c-1.dtsi" - i2c@3100 { - fsl,i2c-erratum-a004447; - }; - /include/ "pq3-duart-0.dtsi" /include/ "pq3-espi-0.dtsi" spi0: spi@7000 { diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi index 8921f17fca42..51e975d7631a 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi @@ -389,23 +389,7 @@ }; /include/ "qoriq-i2c-0.dtsi" - i2c@118000 { - fsl,i2c-erratum-a004447; - }; - - i2c@118100 { - fsl,i2c-erratum-a004447; - }; - /include/ "qoriq-i2c-1.dtsi" - i2c@119000 { - fsl,i2c-erratum-a004447; - }; - - i2c@119100 { - fsl,i2c-erratum-a004447; - }; - /include/ "qoriq-duart-0.dtsi" /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index 39b1c1fa0c81..7f60b6060176 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -78,7 +78,6 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfc000 0x1000>; - fsl,erratum-a009885; }; xmdio0: mdio@fd000 { @@ -86,7 +85,6 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; - fsl,erratum-a009885; }; ptp_timer0: ptp-timer@fe000 { diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts index 18d137a3393f..179a1785d645 100644 --- a/arch/powerpc/boot/dts/lite5200.dts +++ b/arch/powerpc/boot/dts/lite5200.dts @@ -36,7 +36,7 @@ }; }; - memory@0 { + memory { device_type = "memory"; reg = <0x00000000 0x04000000>; // 64MB }; diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts index 29419cf81e04..5abb46c5cc95 100644 --- a/arch/powerpc/boot/dts/lite5200b.dts +++ b/arch/powerpc/boot/dts/lite5200b.dts @@ -35,7 +35,7 @@ led4 { gpios = <&gpio_simple 2 1>; }; }; - memory@0 { + memory { reg = <0x00000000 0x10000000>; // 256MB }; diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts index 3d57463bc49d..b5413cb85f13 100644 --- a/arch/powerpc/boot/dts/media5200.dts +++ b/arch/powerpc/boot/dts/media5200.dts @@ -36,7 +36,7 @@ }; }; - memory@0 { + memory { reg = <0x00000000 0x08000000>; // 128MB RAM }; diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi index ecfba675b561..969b2200b2f9 100644 --- a/arch/powerpc/boot/dts/mpc5200b.dtsi +++ b/arch/powerpc/boot/dts/mpc5200b.dtsi @@ -37,7 +37,7 @@ }; }; - memory: memory@0 { + memory: memory { device_type = "memory"; reg = <0x00000000 0x04000000>; // 64MB }; diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts index 5a676e8141ca..9f6dd4d889b3 100644 --- a/arch/powerpc/boot/dts/o2d.dts +++ b/arch/powerpc/boot/dts/o2d.dts @@ -16,7 +16,7 @@ model = "ifm,o2d"; compatible = "ifm,o2d"; - memory@0 { + memory { reg = <0x00000000 0x08000000>; // 128MB }; diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi index 1b4df5f64b58..cf073e693f24 100644 --- a/arch/powerpc/boot/dts/o2d.dtsi +++ b/arch/powerpc/boot/dts/o2d.dtsi @@ -23,7 +23,7 @@ model = "ifm,o2d"; compatible = "ifm,o2d"; - memory@0 { + memory { reg = <0x00000000 0x04000000>; // 64MB }; diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts index 5184c461a205..a0f5b97a4f06 100644 --- a/arch/powerpc/boot/dts/o2dnt2.dts +++ b/arch/powerpc/boot/dts/o2dnt2.dts @@ -16,7 +16,7 @@ model = "ifm,o2dnt2"; compatible = "ifm,o2d"; - memory@0 { + memory { reg = <0x00000000 0x08000000>; // 128MB }; diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts index 045b90171924..acce49326491 100644 --- a/arch/powerpc/boot/dts/o3dnt.dts +++ b/arch/powerpc/boot/dts/o3dnt.dts @@ -16,7 +16,7 @@ model = "ifm,o3dnt"; compatible = "ifm,o2d"; - memory@0 { + memory { reg = <0x00000000 0x04000000>; // 64MB }; diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts index ac3f53c1a1f5..96b139bf50e9 100644 --- a/arch/powerpc/boot/dts/pcm032.dts +++ b/arch/powerpc/boot/dts/pcm032.dts @@ -26,7 +26,7 @@ model = "phytec,pcm032"; compatible = "phytec,pcm032"; - memory@0 { + memory { reg = <0x00000000 0x08000000>; // 128MB }; diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts index 68b9e8240fb5..1db07f6cf133 100644 --- a/arch/powerpc/boot/dts/tqm5200.dts +++ b/arch/powerpc/boot/dts/tqm5200.dts @@ -36,7 +36,7 @@ }; }; - memory@0 { + memory { device_type = "memory"; reg = <0x00000000 0x04000000>; // 64MB }; diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c index cea34a20085c..8c9ead94be06 100644 --- a/arch/powerpc/boot/ns16550.c +++ b/arch/powerpc/boot/ns16550.c @@ -14,7 +14,6 @@ #include "stdio.h" #include "io.h" #include "ops.h" -#include "of.h" #define UART_DLL 0 /* Out: Divisor Latch Low */ #define UART_DLM 1 /* Out: Divisor Latch High */ @@ -58,20 +57,16 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp) int n; u32 reg_offset; - if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1) { - printf("virt reg parse fail...\r\n"); + if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1) return -1; - } n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset)); if (n == sizeof(reg_offset)) - reg_base += be32_to_cpu(reg_offset); + reg_base += reg_offset; n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift)); if (n != sizeof(reg_shift)) reg_shift = 0; - else - reg_shift = be32_to_cpu(reg_shift); scdp->open = ns16550_open; scdp->putc = ns16550_putc; diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 4dc7c8f9d9e1..e7cb72cdb2ba 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -43,8 +43,6 @@ # define SMPWMB eieio #endif -/* clang defines this macro for a builtin, which will not work with runtime patching */ -#undef __lwsync #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") #define dma_rmb() __lwsync() #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 9c7eb907b165..a734b4b34d26 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -45,7 +45,7 @@ void __patch_exception(int exc, unsigned long addr); #endif #define OP_RT_RA_MASK 0xffff0000UL -#define LIS_R2 0x3c400000UL +#define LIS_R2 0x3c020000UL #define ADDIS_R2_R12 0x3c4c0000UL #define ADDI_R2_R2 0x38420000UL diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 9fe3f05000e3..b118072670fb 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -400,6 +400,7 @@ enum { CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT) +#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ @@ -478,6 +479,8 @@ enum { CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX | CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 | CPU_FTRS_CLASSIC32 | +#else + CPU_FTRS_GENERIC_32 | #endif #ifdef CONFIG_8xx CPU_FTRS_8XX | @@ -527,6 +530,8 @@ enum { CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX & CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 & CPU_FTRS_CLASSIC32 & +#else + CPU_FTRS_GENERIC_32 & #endif #ifdef CONFIG_8xx CPU_FTRS_8XX & diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h index 4494d5e1932f..4efc11dacb98 100644 --- a/arch/powerpc/include/asm/dcr-native.h +++ b/arch/powerpc/include/asm/dcr-native.h @@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mfdcr(rn) \ ({unsigned int rval; \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mfdcr %0, %1" : "=r" (rval) \ - : "n" (rn)); \ + asm volatile("mfdcr %0," __stringify(rn) \ + : "=r" (rval)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ rval = mfdcrx(rn); \ else \ @@ -75,8 +75,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val) #define mtdcr(rn, v) \ do { \ if (__builtin_constant_p(rn) && rn < 1024) \ - asm volatile("mtdcr %0, %1" \ - : : "n" (rn), "r" (v)); \ + asm volatile("mtdcr " __stringify(rn) ",%0" \ + : : "r" (v)); \ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \ mtdcrx(rn, v); \ else \ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 408f9e1fa24a..b64b4212b71f 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -149,9 +149,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, flush_hash_entry(mm, ptep, addr); #endif __asm__ __volatile__("\ - stw%X0 %2,%0\n\ + stw%U0%X0 %2,%0\n\ eieio\n\ - stw%X1 %L2,%1" + stw%U0%X0 %L2,%1" : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) : "r" (pte) : "memory"); diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 2d729b53a556..a1bc7e758422 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -83,7 +83,6 @@ struct ps3_dma_region_ops; * @bus_addr: The 'translated' bus address of the region. * @len: The length in bytes of the region. * @offset: The offset from the start of memory of the region. - * @dma_mask: Device dma_mask. * @ioid: The IOID of the device who owns this region * @chunk_list: Opaque variable used by the ioc page manager. * @region_ops: struct ps3_dma_region_ops - dma region operations @@ -98,7 +97,6 @@ struct ps3_dma_region { enum ps3_dma_region_type region_type; unsigned long len; unsigned long offset; - u64 dma_mask; /* driver variables (set by ps3_dma_region_create) */ unsigned long bus_addr; diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h index 5e8f42ff797f..e8b6b5f7de7c 100644 --- a/arch/powerpc/include/uapi/asm/errno.h +++ b/arch/powerpc/include/uapi/asm/errno.h @@ -1,7 +1,6 @@ #ifndef _ASM_POWERPC_ERRNO_H #define _ASM_POWERPC_ERRNO_H -#undef EDEADLOCK #include #undef EDEADLOCK diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 8d05ef26dea9..41c011cb6070 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -257,10 +257,8 @@ int __init btext_find_display(int allow_nonstdout) rc = btext_initialize(np); printk("result: %d\n", rc); } - if (rc == 0) { - of_node_put(np); + if (rc == 0) break; - } } return rc; } diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 3c7fe1158020..16193d7b0635 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -367,11 +367,14 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) pa = pte_pfn(*ptep); /* On radix we can do hugepage mappings for io, so handle that */ - if (!hugepage_shift) - hugepage_shift = PAGE_SHIFT; + if (hugepage_shift) { + pa <<= hugepage_shift; + pa |= token & ((1ul << hugepage_shift) - 1); + } else { + pa <<= PAGE_SHIFT; + pa |= token & (PAGE_SIZE - 1); + } - pa <<= PAGE_SHIFT; - pa |= token & ((1ul << hugepage_shift) - 1); return pa; } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 282ad1930593..4c9b5970af37 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1019,7 +1019,7 @@ int iommu_take_ownership(struct iommu_table *tbl) spin_lock_irqsave(&tbl->large_pool.lock, flags); for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); + spin_lock(&tbl->pools[i].lock); if (tbl->it_offset == 0) clear_bit(0, tbl->it_map); @@ -1048,7 +1048,7 @@ void iommu_release_ownership(struct iommu_table *tbl) spin_lock_irqsave(&tbl->large_pool.lock, flags); for (i = 0; i < tbl->nr_pools; i++) - spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); + spin_lock(&tbl->pools[i].lock); memset(tbl->it_map, 0, sz); diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 0a96b1941397..08b7a40de5f8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -662,7 +662,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* * If found, replace it with: * addis r2, r12, (.TOC.-func)@ha - * addi r2, r2, (.TOC.-func)@l + * addi r2, r12, (.TOC.-func)@l */ ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value); ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 77690c7f2671..04a27307a2c4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -258,7 +258,7 @@ static struct feature_property { }; #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU) -static __init void identical_pvr_fixup(unsigned long node) +static inline void identical_pvr_fixup(unsigned long node) { unsigned int pvr; const char *model = of_get_flat_dt_prop(node, "model", NULL); diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 204e44cc896e..3139533640fc 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2526,7 +2526,7 @@ static void __init fixup_device_tree_efika_add_phy(void) /* Check if the phy-handle property exists - bail if it does */ rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); - if (rv <= 0) + if (!rv) return; /* diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 19ba60ab1807..ec9ec2058d2d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -732,12 +732,10 @@ void start_secondary(void *unused) BUG(); } -#ifdef CONFIG_PROFILING int setup_profiling_timer(unsigned int multiplier) { return 0; } -#endif #ifdef CONFIG_SCHED_SMT /* cpumask of CPUs with asymetric SMT dependancy */ diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 308744830f55..b1b2273d1f6d 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -230,17 +230,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) * value so we can restore it on the way out. */ orig_rets = args.rets; - if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { - /* - * Don't overflow our args array: ensure there is room for - * at least rets[0] (even if the call specifies 0 nret). - * - * Each handler must then check for the correct nargs and nret - * values, but they may always return failure in rets[0]. - */ - rc = -EINVAL; - goto fail; - } args.rets = &args.args[be32_to_cpu(args.nargs)]; mutex_lock(&vcpu->kvm->arch.rtas_token_lock); @@ -268,17 +257,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) fail: /* * We only get here if the guest has called RTAS with a bogus - * args pointer or nargs/nret values that would overflow the - * array. That means we can't get to the args, and so we can't - * fail the RTAS call. So fail right out to userspace, which - * should kill the guest. - * - * SLOF should actually pass the hcall return value from the - * rtas handler call in r3, so enter_rtas could be modified to - * return a failure indication in r3 and we could return such - * errors to the guest rather than failing to host userspace. - * However old guests that don't test for failure could then - * continue silently after errors, so for now we won't do this. + * args pointer. That means we can't get to the args, and so we + * can't fail the RTAS call. So fail right out to userspace, + * which should kill the guest. */ return rc; } diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index b5dc2a03ea93..40b134bf5a68 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -280,9 +279,8 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types) : "unknown"); } -static int __do_entry_flush_fixups(void *data) +void do_entry_flush_fixups(enum l1d_flush_type types) { - enum l1d_flush_type types = *(enum l1d_flush_type *)data; unsigned int instrs[3], *dest; long *start, *end; int i; @@ -333,19 +331,6 @@ static int __do_entry_flush_fixups(void *data) : "ori type" : (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); - - return 0; -} - -void do_entry_flush_fixups(enum l1d_flush_type types) -{ - /* - * The call to the fallback flush can not be safely patched in/out while - * other CPUs are executing it. So call __do_entry_flush_fixups() on one - * CPU while all other CPUs spin in the stop machine core with interrupts - * hard disabled. - */ - stop_machine(__do_entry_flush_fixups, &types, NULL); } void do_rfi_flush_fixups(enum l1d_flush_type types) diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c index 4b81fd96aa3e..c7f8e9586316 100644 --- a/arch/powerpc/lib/ppc_ksyms.c +++ b/arch/powerpc/lib/ppc_ksyms.c @@ -24,6 +24,7 @@ EXPORT_SYMBOL(csum_tcpudp_magic); #endif EXPORT_SYMBOL(__copy_tofrom_user); +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 7a80e1cff6e2..30e2e8efbe6b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2008,17 +2008,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, left += period; if (left <= 0) left = period; - - /* - * If address is not requested in the sample via - * PERF_SAMPLE_IP, just record that sample irrespective - * of SIAR valid check. - */ - if (event->attr.sample_type & PERF_SAMPLE_IP) - record = siar_valid(regs); - else - record = 1; - + record = siar_valid(regs); event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) @@ -2030,17 +2020,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val, local64_set(&event->hw.period_left, left); perf_event_update_userpage(event); - /* - * Due to hardware limitation, sometimes SIAR could sample a kernel - * address even when freeze on supervisor state (kernel) is set in - * MMCR2. Check attr.exclude_kernel and address to drop the sample in - * these cases. - */ - if (event->attr.exclude_kernel && - (event->attr.sample_type & PERF_SAMPLE_IP) && - is_kernel_addr(mfspr(SPRN_SIAR))) - record = 0; - /* * Finally record data if requested. */ diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index 5f44e9223413..08ab6fefcf7a 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -180,7 +180,7 @@ sram_code: udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ - add r12, r13, r12 /* end */ + addi r12, r13, r12 /* end */ 1: mftb r13 /* current */ cmp cr0, r13, r12 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 4edceff5791a..14a582b21274 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -1107,7 +1107,6 @@ static int __init cell_iommu_fixed_mapping_init(void) if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); - of_node_put(np); return -1; } } diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index b3bcdce89c3b..9485f1024d46 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -220,7 +220,6 @@ void hlwd_pic_probe(void) irq_set_chained_handler(cascade_virq, hlwd_pic_irq_cascade); hlwd_irq_host = host; - of_node_put(np); break; } } diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index d28c4a9269c3..e4169d68cb32 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -401,7 +401,6 @@ void opal_lpc_init(void) if (!of_get_property(np, "primary", NULL)) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); - of_node_put(np); break; } if (opal_lpc_chip_id < 0) diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 76cbf1be9962..19bae78b1f25 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -18,7 +18,6 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include #include #include #include @@ -1133,7 +1132,6 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, enum ps3_dma_region_type region_type, void *addr, unsigned long len) { unsigned long lpar_addr; - int result; lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0; @@ -1145,16 +1143,6 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev, r->offset -= map.r1.offset; r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); - dev->core.dma_mask = &r->dma_mask; - - result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32)); - - if (result < 0) { - dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n", - __func__, __LINE__, result); - return result; - } - switch (dev->dev_type) { case PS3_DEVICE_TYPE_SB: r->region_ops = (USE_DYNAMIC_DMA) diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 91a667d8b1e9..551ba5b35df9 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -131,6 +131,7 @@ void dlpar_free_cc_nodes(struct device_node *dn) #define NEXT_PROPERTY 3 #define PREV_PARENT 4 #define MORE_MEMORY 5 +#define CALL_AGAIN -2 #define ERR_CFG_USE -9003 struct device_node *dlpar_configure_connector(__be32 drc_index, @@ -172,9 +173,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, spin_unlock(&rtas_data_buf_lock); - if (rtas_busy_delay(rc)) - continue; - switch (rc) { case COMPLETE: break; @@ -227,6 +225,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index, parent_path = last_dn->parent->full_name; break; + case CALL_AGAIN: + break; + case MORE_MEMORY: case ERR_CFG_USE: default: diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index baaeb753fa79..5d4a3df59d0c 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -98,7 +98,6 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic); int remove_phb_dynamic(struct pci_controller *phb) { struct pci_bus *b = phb->bus; - struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge); struct resource *res; int rc, i; @@ -125,8 +124,7 @@ int remove_phb_dynamic(struct pci_controller *phb) /* Remove the PCI bus and unregister the bridge device from sysfs */ phb->bus = NULL; pci_remove_bus(b); - host_bridge->bus = NULL; - device_unregister(&host_bridge->dev); + device_unregister(b->bridge); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 0a0e0c8256f6..e76aefae2aa2 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -224,6 +224,7 @@ static struct bus_type suspend_subsys = { static const struct platform_suspend_ops pseries_suspend_ops = { .valid = suspend_valid_only_mem, + .begin = pseries_suspend_begin, .prepare_late = pseries_prepare_late, .enter = pseries_suspend_enter, }; diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S index 3943d19d5f63..d3098ef1404a 100644 --- a/arch/powerpc/sysdev/dcr-low.S +++ b/arch/powerpc/sysdev/dcr-low.S @@ -14,7 +14,7 @@ #include #define DCR_ACCESS_PROLOG(table) \ - cmplwi cr0,r3,1024; \ + cmpli cr0,r3,1024; \ rlwinm r3,r3,4,18,27; \ lis r5,table@h; \ ori r5,r5,table@l; \ diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 3140095ee757..994fe73c2ed0 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev) /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); - msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc)); + msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); if (!msgr_block_addr) { dev_err(&dev->dev, "Failed to iomap MPIC message registers"); return -EFAULT; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 73a301873fbf..e92a684e855d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -835,7 +835,7 @@ config CMM_IUCV config APPLDATA_BASE def_bool n prompt "Linux - VM Monitor Stream, base infrastructure" - depends on PROC_SYSCTL + depends on PROC_FS help This provides a kernel interface for creating and updating z/VM APPLDATA monitor records. The monitor records are updated at certain time diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 7f53e40597f9..44feac38ccfc 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -19,7 +19,6 @@ static char local_guest[] = " "; static char all_guests[] = "* "; -static char *all_groups = all_guests; static char *guest_query; struct diag2fc_data { @@ -62,11 +61,10 @@ static int diag2fc(int size, char* query, void *addr) memcpy(parm_list.userid, query, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN); - memcpy(parm_list.aci_grp, all_groups, NAME_LEN); - ASCEBC(parm_list.aci_grp, NAME_LEN); - parm_list.addr = (unsigned long)addr; + parm_list.addr = (unsigned long) addr ; parm_list.size = size; parm_list.fmt = 0x02; + memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; diag_stat_inc(DIAG_STAT_X2FC); diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 6dd874d5ba7b..836c56290499 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -19,7 +19,6 @@ void ftrace_caller(void); extern char ftrace_graph_caller_end; extern unsigned long ftrace_plt; -extern void *ftrace_func; struct dyn_arch_ftrace { }; diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 687a4567d4ad..7a92e69c50bc 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -97,19 +97,6 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { free_page_and_swap_cache(page); } -static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - /* - * the range might exceed the original range that was provided to - * tlb_gather_mmu(), so we need to update it despite the fact it is - * usually not updated. - */ - if (tlb->start > address) - tlb->start = address; - if (tlb->end < address + size) - tlb->end = address + size; -} /* * pte_free_tlb frees a pte table and clears the CRSTE for the diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index c15546c6fb66..7f768914fb4f 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -37,12 +37,10 @@ static int diag8_noresponse(int cmdlen) static int diag8_response(int cmdlen, char *response, int *rlen) { - unsigned long _cmdlen = cmdlen | 0x40000000L; - unsigned long _rlen = *rlen; register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; register unsigned long reg3 asm ("3") = (addr_t) response; - register unsigned long reg4 asm ("4") = _cmdlen; - register unsigned long reg5 asm ("5") = _rlen; + register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; + register unsigned long reg5 asm ("5") = *rlen; asm volatile( " sam31\n" diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index c4def044f27b..07477ba392b7 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -2025,7 +2025,7 @@ void show_code(struct pt_regs *regs) start += opsize; printk(buffer); ptr = buffer; - ptr += sprintf(ptr, "\n "); + ptr += sprintf(ptr, "\n\t "); hops++; } printk("\n"); @@ -2033,7 +2033,7 @@ void show_code(struct pt_regs *regs) void print_fn_code(unsigned char *code, unsigned long len) { - char buffer[128], *ptr; + char buffer[64], *ptr; int opsize, i; while (len) { diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index d43f18b3d42c..4cad1adff16b 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -889,7 +889,6 @@ ENTRY(ext_int_handler) * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. */ ENTRY(psw_idle) - stg %r14,(__SF_GPRS+8*8)(%r15) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 6617fae13bd3..e0eaf11134b4 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -55,7 +55,6 @@ * > brasl %r0,ftrace_caller # offset 0 */ -void *ftrace_func __read_mostly = ftrace_stub; unsigned long ftrace_plt; static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) @@ -165,7 +164,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) int ftrace_update_ftrace_func(ftrace_func_t func) { - ftrace_func = func; return 0; } diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cbc187706648..083b05f5f5ab 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -43,7 +43,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected, unsigned char *ipe = (unsigned char *)expected; unsigned char *ipn = (unsigned char *)new; - pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc); + pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); pr_emerg("Found: %6ph\n", ipc); pr_emerg("Expected: %6ph\n", ipe); pr_emerg("New: %6ph\n", ipn); diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 9eb55077896c..68425e68e65a 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -56,13 +56,13 @@ ENTRY(ftrace_caller) #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES aghik %r2,%r0,-MCOUNT_INSN_SIZE lgrl %r4,function_trace_op - lgrl %r1,ftrace_func + lgrl %r1,ftrace_trace_function #else lgr %r2,%r0 aghi %r2,-MCOUNT_INSN_SIZE larl %r4,function_trace_op lg %r4,0(%r4) - larl %r1,ftrace_func + larl %r1,ftrace_trace_function lg %r1,0(%r1) #endif lgr %r3,%r14 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a765b4936c10..fdc5e76e1f6b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -687,6 +687,9 @@ static void __init setup_memory(void) storage_key_init_range(reg->base, reg->base + reg->size); } psw_set_key(PAGE_DEFAULT_KEY); + + /* Only cosmetics */ + memblock_enforce_memory_limit(memblock_end_of_DRAM()); } /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 486f0d4f9aee..f113fcd781d8 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -738,7 +738,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) { struct sclp_core_entry *core; - static cpumask_t avail; + cpumask_t avail; bool configured; u16 core_id; int nr, i; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 1593133c8c88..ef03726cc661 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -21,14 +21,17 @@ /** * kvm_s390_real_to_abs - convert guest real address to guest absolute address - * @prefix - guest prefix + * @vcpu - guest virtual cpu * @gra - guest real address * * Returns the guest absolute address that corresponds to the passed guest real - * address @gra of by applying the given prefix. + * address @gra of a virtual guest cpu by applying its prefix. */ -static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra) +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, + unsigned long gra) { + unsigned long prefix = kvm_s390_get_prefix(vcpu); + if (gra < 2 * PAGE_SIZE) gra += prefix; else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE) @@ -36,20 +39,6 @@ static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra) return gra; } -/** - * kvm_s390_real_to_abs - convert guest real address to guest absolute address - * @vcpu - guest virtual cpu - * @gra - guest real address - * - * Returns the guest absolute address that corresponds to the passed guest real - * address @gra of a virtual guest cpu by applying its prefix. - */ -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, - unsigned long gra) -{ - return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra); -} - /** * kvm_s390_logical_to_effective - convert guest logical to effective address * @vcpu: guest virtual cpu diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index ac67fa0f2e4d..b647d5ff0ad9 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -225,13 +225,14 @@ EXPORT_SYMBOL(strcmp); */ char * strrchr(const char * s, int c) { - ssize_t len = __strend(s) - s; - - do { - if (s[len] == (char)c) - return (char *)s + len; - } while (--len >= 0); - return NULL; + size_t len = __strend(s) - s; + + if (len) + do { + if (s[len] == (char) c) + return (char *) s + len; + } while (--len > 0); + return NULL; } EXPORT_SYMBOL(strrchr); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 03ad0455931d..bcf409997d6d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -115,7 +115,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1]; - if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) + if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) jit->seen_reg[r1] = 1; } @@ -596,10 +596,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9080000, dst_reg, src_reg); break; case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */ - if (imm != 0) { - /* alfi %dst,imm */ - EMIT6_IMM(0xc20b0000, dst_reg, imm); - } + if (!imm) + break; + /* alfi %dst,imm */ + EMIT6_IMM(0xc20b0000, dst_reg, imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */ @@ -621,22 +621,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9090000, dst_reg, src_reg); break; case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */ - if (imm != 0) { - /* alfi %dst,-imm */ - EMIT6_IMM(0xc20b0000, dst_reg, -imm); - } + if (!imm) + break; + /* alfi %dst,-imm */ + EMIT6_IMM(0xc20b0000, dst_reg, -imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */ if (!imm) break; - if (imm == -0x80000000) { - /* algfi %dst,0x80000000 */ - EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000); - } else { - /* agfi %dst,-imm */ - EMIT6_IMM(0xc2080000, dst_reg, -imm); - } + /* agfi %dst,-imm */ + EMIT6_IMM(0xc2080000, dst_reg, -imm); break; /* * BPF_MUL @@ -651,10 +646,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb90c0000, dst_reg, src_reg); break; case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */ - if (imm != 1) { - /* msfi %r5,imm */ - EMIT6_IMM(0xc2010000, dst_reg, imm); - } + if (imm == 1) + break; + /* msfi %r5,imm */ + EMIT6_IMM(0xc2010000, dst_reg, imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */ @@ -715,8 +710,6 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i if (BPF_OP(insn->code) == BPF_MOD) /* lhgi %dst,0 */ EMIT4_IMM(0xa7090000, dst_reg, 0); - else - EMIT_ZERO(dst_reg); break; } /* lhi %w0,0 */ @@ -809,10 +802,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9820000, dst_reg, src_reg); break; case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */ - if (imm != 0) { - /* xilf %dst,imm */ - EMIT6_IMM(0xc0070000, dst_reg, imm); - } + if (!imm) + break; + /* xilf %dst,imm */ + EMIT6_IMM(0xc0070000, dst_reg, imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */ @@ -833,10 +826,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */ - if (imm != 0) { - /* sll %dst,imm(%r0) */ - EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); - } + if (imm == 0) + break; + /* sll %dst,imm(%r0) */ + EMIT4_DISP(0x89000000, dst_reg, REG_0, imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */ @@ -858,10 +851,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0); break; case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */ - if (imm != 0) { - /* srl %dst,imm(%r0) */ - EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); - } + if (imm == 0) + break; + /* srl %dst,imm(%r0) */ + EMIT4_DISP(0x88000000, dst_reg, REG_0, imm); EMIT_ZERO(dst_reg); break; case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */ diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index c50c397cbcf7..5f2bb4242c0f 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -60,7 +60,6 @@ config DUMP_CODE config DWARF_UNWINDER bool "Enable the DWARF unwinder for stacktraces" - depends on DEBUG_KERNEL select FRAME_POINTER depends on SUPERH32 default n diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index ac834e9e0e0a..78bc97b1d027 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -62,7 +62,8 @@ config PVR2_DMA config G2_DMA tristate "G2 Bus DMA support" - depends on SH_DREAMCAST && SH_DMA_API + depends on SH_DREAMCAST + select SH_DMA_API help This enables support for the DMA controller for the Dreamcast's G2 bus. Drivers that want this will generally enable this on diff --git a/arch/sh/include/asm/sfp-machine.h b/arch/sh/include/asm/sfp-machine.h index dd195c6f3b9d..d3c548443f2a 100644 --- a/arch/sh/include/asm/sfp-machine.h +++ b/arch/sh/include/asm/sfp-machine.h @@ -25,14 +25,6 @@ #ifndef _SFP_MACHINE_H #define _SFP_MACHINE_H -#ifdef __BIG_ENDIAN__ -#define __BYTE_ORDER __BIG_ENDIAN -#define __LITTLE_ENDIAN 0 -#else -#define __BYTE_ORDER __LITTLE_ENDIAN -#define __BIG_ENDIAN 0 -#endif - #define _FP_W_TYPE_SIZE 32 #define _FP_W_TYPE unsigned long #define _FP_WS_TYPE signed long diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 3ee32d21fe9f..62f80d2a9df9 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -65,16 +65,6 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) tlb->end = address + PAGE_SIZE; } -static inline void -tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, - unsigned long size) -{ - if (tlb->start > address) - tlb->start = address; - if (tlb->end < address + size) - tlb->end = address + size; -} - /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 4a1cee5da2dc..4a298808789c 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c @@ -78,9 +78,8 @@ static void shx3_prepare_cpus(unsigned int max_cpus) BUILD_BUG_ON(SMP_MSG_NR >= 8); for (i = 0; i < SMP_MSG_NR; i++) - if (request_irq(104 + i, ipi_interrupt_handler, - IRQF_PERCPU, "IPI", (void *)(long)i)) - pr_err("Failed to request irq %d\n", i); + request_irq(104 + i, ipi_interrupt_handler, + IRQF_PERCPU, "IPI", (void *)(long)i); for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 4209f72c1973..a1eb588fd46f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -541,7 +541,7 @@ config COMPAT bool depends on SPARC64 default y - select COMPAT_BINFMT_ELF if BINFMT_ELF + select COMPAT_BINFMT_ELF select HAVE_UID16 select ARCH_WANT_OLD_COMPAT_IPC select COMPAT_OLD_SIGACTION diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 75445ba7e237..6f80936e0eea 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -37,7 +37,6 @@ struct mdesc_hdr { u32 node_sz; /* node block size */ u32 name_sz; /* name block size */ u32 data_sz; /* data block size */ - char data[]; } __attribute__((aligned(16))); struct mdesc_elem { @@ -370,7 +369,7 @@ out: static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) { - return (struct mdesc_elem *) mdesc->data; + return (struct mdesc_elem *) (mdesc + 1); } static void *name_block(struct mdesc_hdr *mdesc) diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c index fa4abbaf27de..c4d42a50ebc0 100644 --- a/arch/sparc/lib/iomap.c +++ b/arch/sparc/lib/iomap.c @@ -18,10 +18,8 @@ void ioport_unmap(void __iomem *addr) EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); -#ifdef CONFIG_PCI void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { /* nothing to do */ } EXPORT_SYMBOL(pci_iounmap); -#endif diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index 19000777f7c9..f75e6906df14 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -137,7 +137,6 @@ __bzero: ZERO_LAST_BLOCKS(%o0, 0x48, %g2) ZERO_LAST_BLOCKS(%o0, 0x08, %g2) 13: - EXT(12b, 13b, 21f) be 8f andcc %o1, 4, %g0 diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c index feb7f5ab4084..3fd7c3efdb18 100644 --- a/arch/um/drivers/chan_user.c +++ b/arch/um/drivers/chan_user.c @@ -256,8 +256,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out, goto out_close; } - err = os_set_fd_block(*fd_out, 0); - if (err) { + if (os_set_fd_block(*fd_out, 0)) { printk(UM_KERN_ERR "winch_tramp: failed to set thread_fd " "non-blocking.\n"); goto out_close; diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c index 76d155631c5d..0d6b66c64a81 100644 --- a/arch/um/drivers/slip_user.c +++ b/arch/um/drivers/slip_user.c @@ -145,8 +145,7 @@ static int slip_open(void *data) } sfd = err; - err = set_up_tty(sfd); - if (err) + if (set_up_tty(sfd)) goto out_close2; pri->slave = sfd; diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c index e3b422ebce09..20e30be44795 100644 --- a/arch/um/drivers/xterm.c +++ b/arch/um/drivers/xterm.c @@ -18,7 +18,6 @@ struct xterm_chan { int pid; int helper_pid; - int chan_fd; char *title; int device; int raw; @@ -34,7 +33,6 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts) return NULL; *data = ((struct xterm_chan) { .pid = -1, .helper_pid = -1, - .chan_fd = -1, .device = device, .title = opts->xterm_title, .raw = opts->raw } ); @@ -151,7 +149,6 @@ static int xterm_open(int input, int output, int primary, void *d, goto out_kill; } - data->chan_fd = fd; new = xterm_fd(fd, &data->helper_pid); if (new < 0) { err = new; @@ -209,8 +206,6 @@ static void xterm_close(int fd, void *d) os_kill_process(data->helper_pid, 0); data->helper_pid = -1; - if (data->chan_fd != -1) - os_close_file(data->chan_fd); os_close_file(fd); } diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index f9d7e92dbac9..16eb63fac57d 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -110,18 +110,6 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } -static inline void -tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, - unsigned long size) -{ - tlb->need_flush = 1; - - if (tlb->start > address) - tlb->start = address; - if (tlb->end < address + size) - tlb->end = address + size; -} - /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h index 089f979e112e..f5b76355ad71 100644 --- a/arch/um/include/shared/registers.h +++ b/arch/um/include/shared/registers.h @@ -14,8 +14,8 @@ extern int restore_fp_registers(int pid, unsigned long *fp_regs); extern int save_fpx_registers(int pid, unsigned long *fp_regs); extern int restore_fpx_registers(int pid, unsigned long *fp_regs); extern int save_registers(int pid, struct uml_pt_regs *regs); -extern int restore_pid_registers(int pid, struct uml_pt_regs *regs); -extern int init_pid_registers(int pid); +extern int restore_registers(int pid, struct uml_pt_regs *regs); +extern int init_registers(int pid); extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); extern unsigned long get_thread_reg(int reg, jmp_buf *buf); extern int get_fp_registers(int pid, unsigned long *regs); diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index d837636ec823..adde088aeeff 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -6,12 +6,6 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; -VERSION { - { - local: *; - }; -} - SECTIONS { PROVIDE (__executable_start = START); diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 09620aa953ca..6899195602b7 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -6,12 +6,6 @@ OUTPUT_ARCH(ELF_ARCH) ENTRY(_start) jiffies = jiffies_64; -VERSION { - { - local: *; - }; -} - SECTIONS { /* This must contain the right address - not quite the default ELF one.*/ diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index 34a5963bd7ef..2ff8d4fe83c4 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c @@ -21,7 +21,7 @@ int save_registers(int pid, struct uml_pt_regs *regs) return 0; } -int restore_pid_registers(int pid, struct uml_pt_regs *regs) +int restore_registers(int pid, struct uml_pt_regs *regs) { int err; @@ -36,7 +36,7 @@ int restore_pid_registers(int pid, struct uml_pt_regs *regs) static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[FP_SIZE]; -int init_pid_registers(int pid) +int init_registers(int pid) { int err; diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index dc06933ba63d..22a358ef1b0c 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -334,7 +334,7 @@ void __init os_early_checks(void) check_tmpexec(); pid = start_ptraced_child(); - if (init_pid_registers(pid)) + if (init_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } diff --git a/arch/x86/Makefile b/arch/x86/Makefile index e94f8ed5eea5..0a3081d64855 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -34,13 +34,12 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ - -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) + -mno-mmx -mno-sse REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member) REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) -REALMODE_CFLAGS += $(CLANG_FLAGS) export REALMODE_CFLAGS # BITS is used as extension for files which are available in a 32 bit @@ -62,9 +61,6 @@ endif KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow KBUILD_CFLAGS += $(call cc-option,-mno-avx,) -# Intel CET isn't enabled in the kernel -KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) - ifeq ($(CONFIG_X86_32),y) BITS := 32 UTS_MACHINE := i386 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index b9d8d72d397e..5993813c733d 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -34,8 +34,6 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse KBUILD_CFLAGS += $(call cc-option,-ffreestanding) KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) -# Disable relocation relaxation in case the link is not PIE. -KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index 6f099c7218a4..007d0867fd6a 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -18,7 +18,6 @@ CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_SCHEDTUNE=y CONFIG_CGROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y CONFIG_NAMESPACES=y CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -49,10 +48,11 @@ CONFIG_PREEMPT=y # CONFIG_MICROCODE is not set CONFIG_X86_MSR=y CONFIG_X86_CPUID=y +CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_ZSMALLOC=y # CONFIG_MTRR is not set -CONFIG_EFI=y CONFIG_HZ_100=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y @@ -60,7 +60,7 @@ CONFIG_PHYSICAL_START=0x200000 CONFIG_RANDOMIZE_BASE=y CONFIG_PHYSICAL_ALIGN=0x1000000 CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="nopti" +CONFIG_CMDLINE="console=ttyS0 reboot=p nopti" CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 @@ -206,7 +206,6 @@ CONFIG_OF_UNITTEST=y # CONFIG_PNP_DEBUG_MESSAGES is not set CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=16 CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y @@ -229,7 +228,6 @@ CONFIG_DM_ZERO=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y -CONFIG_DM_VERITY_AVB=y CONFIG_DM_ANDROID_VERITY=y CONFIG_NETDEVICES=y CONFIG_NETCONSOLE=y @@ -427,7 +425,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y -# CONFIG_EFIVAR_FS is not set CONFIG_SDCARD_FS=y CONFIG_PSTORE=y CONFIG_PSTORE_CONSOLE=y diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 0a86120c1016..9fe3fcd55242 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -57,7 +57,7 @@ ENDPROC(native_usergs_sysret64) .macro TRACE_IRQS_IRETQ #ifdef CONFIG_TRACE_IRQFLAGS - btl $9, EFLAGS(%rsp) /* interrupts off? */ + bt $9, EFLAGS(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 34f11bc42d9b..3328a37ddc75 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -168,6 +168,16 @@ static inline void disable_local_APIC(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC +/* + * Make previous memory operations globally visible before + * sending the IPI through x2apic wrmsr. We need a serializing instruction or + * mfence for this. + */ +static inline void x2apic_wrmsr_fence(void) +{ + asm volatile("mfence" : : : "memory"); +} + static inline void native_apic_msr_write(u32 reg, u32 v) { if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 47cb64dd319a..afc2387323c9 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -77,7 +77,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v) */ static __always_inline int atomic_sub_and_test(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); } /** @@ -114,7 +114,7 @@ static __always_inline void atomic_dec(atomic_t *v) */ static __always_inline int atomic_dec_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); } /** @@ -127,7 +127,7 @@ static __always_inline int atomic_dec_and_test(atomic_t *v) */ static __always_inline int atomic_inc_and_test(atomic_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); } /** @@ -141,7 +141,7 @@ static __always_inline int atomic_inc_and_test(atomic_t *v) */ static __always_inline int atomic_add_negative(int i, atomic_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index fbb9a82599ab..377fa50cc271 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -72,7 +72,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) */ static inline int atomic64_sub_and_test(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); } /** @@ -111,7 +111,7 @@ static __always_inline void atomic64_dec(atomic64_t *v) */ static inline int atomic64_dec_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); } /** @@ -124,7 +124,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v) */ static inline int atomic64_inc_and_test(atomic64_t *v) { - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); + GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); } /** @@ -138,7 +138,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) */ static inline int atomic64_add_negative(long i, atomic64_t *v) { - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 134d7ffc662e..b2a5bef74282 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -119,22 +119,4 @@ do { \ #define smp_mb__before_atomic() do { } while (0) #define smp_mb__after_atomic() do { } while (0) -/* - * Make previous memory operations globally visible before - * a WRMSR. - * - * MFENCE makes writes visible, but only affects load/store - * instructions. WRMSR is unfortunately not a load/store - * instruction and is unaffected by MFENCE. The LFENCE ensures - * that the WRMSR is not reordered. - * - * Most WRMSRs are full serializing instructions themselves and - * do not require this barrier. This is only required for the - * IA32_TSC_DEADLINE and X2APIC MSRs. - */ -static inline void weak_wrmsr_fence(void) -{ - asm volatile("mfence; lfence" : : : "memory"); -} - #endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 390e323a4de9..cfe3b954d5e4 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -77,7 +77,7 @@ set_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)CONST_MASK(nr)) : "memory"); } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" + asm volatile(LOCK_PREFIX "bts %1,%0" : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); } } @@ -93,7 +93,7 @@ set_bit(long nr, volatile unsigned long *addr) */ static inline void __set_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -114,7 +114,7 @@ clear_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)~CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" + asm volatile(LOCK_PREFIX "btr %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -136,7 +136,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) static inline void __clear_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); } /* @@ -168,7 +168,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline void __change_bit(long nr, volatile unsigned long *addr) { - asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); + asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); } /** @@ -187,7 +187,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" + asm volatile(LOCK_PREFIX "btc %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -203,8 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), - *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); } /** @@ -233,7 +232,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) { int oldbit; - asm(__ASM_SIZE(bts) " %2,%1\n\t" + asm("bts %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr)); @@ -250,8 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), - *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); } /** @@ -274,7 +272,7 @@ static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) { int oldbit; - asm volatile(__ASM_SIZE(btr) " %2,%1\n\t" + asm volatile("btr %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr)); @@ -286,7 +284,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) { int oldbit; - asm volatile(__ASM_SIZE(btc) " %2,%1\n\t" + asm volatile("btc %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -304,8 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) */ static inline int test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), - *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); } static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) @@ -318,7 +315,7 @@ static inline int variable_test_bit(long nr, volatile const unsigned long *addr) { int oldbit; - asm volatile(__ASM_SIZE(bt) " %2,%1\n\t" + asm volatile("bt %2,%1\n\t" "sbb %0,%0" : "=r" (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4fb38927128c..66a5e60f60c4 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -217,14 +217,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) } } -static inline void fxsave(struct fxregs_state *fx) -{ - if (IS_ENABLED(CONFIG_X86_32)) - asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx)); - else - asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx)); -} - /* These macros all use (%edi)/(%rdi) as the single memory argument. */ #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" @@ -294,6 +286,28 @@ static inline void fxsave(struct fxregs_state *fx) : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") +/* + * This function is called only during boot time when x86 caps are not set + * up and alternative can not be used yet. + */ +static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) +{ + u64 mask = -1; + u32 lmask = mask; + u32 hmask = mask >> 32; + int err; + + WARN_ON(system_state != SYSTEM_BOOTING); + + if (static_cpu_has(X86_FEATURE_XSAVES)) + XSTATE_OP(XSAVES, xstate, lmask, hmask, err); + else + XSTATE_OP(XSAVE, xstate, lmask, hmask, err); + + /* We should never fault when copying to a kernel buffer: */ + WARN_ON_FPU(err); +} + /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h index 6db02d52cdf4..5a51fcbbe563 100644 --- a/arch/x86/include/asm/insn.h +++ b/arch/x86/include/asm/insn.h @@ -198,21 +198,6 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } -/** - * for_each_insn_prefix() -- Iterate prefixes in the instruction - * @insn: Pointer to struct insn. - * @idx: Index storage. - * @prefix: Prefix byte. - * - * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix - * and the index is stored in @idx (note that this @idx is just for a cursor, - * do not change it.) - * Since prefixes.nbytes can be bigger than 4 if some prefixes - * are repeated, it cannot be used for looping over the prefixes. - */ -#define for_each_insn_prefix(insn, idx, prefix) \ - for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++) - #define POP_SS_OPCODE 0x1f #define MOV_SREG_OPCODE 0x8e diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 53238f0da79e..4ad6560847b1 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -52,7 +52,7 @@ static inline void local_sub(long i, local_t *l) */ static inline int local_sub_and_test(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e); + GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); } /** @@ -65,7 +65,7 @@ static inline int local_sub_and_test(long i, local_t *l) */ static inline int local_dec_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e); + GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); } /** @@ -78,7 +78,7 @@ static inline int local_dec_and_test(local_t *l) */ static inline int local_inc_and_test(local_t *l) { - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e); + GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); } /** @@ -92,7 +92,7 @@ static inline int local_inc_and_test(local_t *l) */ static inline int local_add_negative(long i, local_t *l) { - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s); + GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); } /** diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 67a140d77f33..fb1251946b45 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -15,7 +15,7 @@ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define CURRENT_MASK (~(THREAD_SIZE - 1)) -#define EXCEPTION_STACK_ORDER (1 + KASAN_STACK_ORDER) +#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER) #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 66cd0c862a80..f5e780bfa2b3 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -534,7 +534,7 @@ static inline int x86_this_cpu_variable_test_bit(int nr, { int oldbit; - asm volatile("btl "__percpu_arg(2)",%1\n\t" + asm volatile("bt "__percpu_arg(2)",%1\n\t" "sbb %0,%0" : "=r" (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index ad6661ca315d..01bcde84d3e4 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val) */ static __always_inline bool __preempt_count_dec_and_test(void) { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); } /* diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index ae6f1592530b..a4a77286cb1d 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h @@ -3,8 +3,6 @@ #include -struct task_struct; - /* misc architecture specific prototypes */ void syscall_init(void); diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index cb0dce0273c8..8f7866a5b9a4 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -5,7 +5,7 @@ #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ - asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ + asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ : : "m" (var), ## __VA_ARGS__ \ : "memory" : cc_label); \ return 0; \ @@ -24,7 +24,7 @@ cc_label: \ #define __GEN_RMWcc(fullop, var, cc, ...) \ do { \ char c; \ - asm volatile (fullop "; set" #cc " %1" \ + asm volatile (fullop "; set" cc " %1" \ : "+m" (var), "=qm" (c) \ : __VA_ARGS__ : "memory"); \ return c != 0; \ diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index c1adb2ed6d41..6136d99f537b 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -108,8 +108,6 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_IGN_TPR_SHIFT 20 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) -#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) - #define V_INTR_MASKING_SHIFT 24 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 33a594f728de..8dab88b85785 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -245,15 +245,12 @@ static inline void __native_flush_tlb_single(unsigned long addr) * ASID. But, userspace flushes are probably much more * important performance-wise. * - * In the KAISER disabled case, do an INVLPG to make sure - * the mapping is flushed in case it is a global one. + * Make sure to do only a single invpcid when KAISER is + * disabled and we have only a single ASID. */ - if (kaiser_enabled) { + if (kaiser_enabled) invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr); - invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr); - } else { - asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); - } + invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr); } static inline void __flush_tlb_all(void) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f53849f3f7fb..4dcf71c26d64 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include #include @@ -465,9 +464,6 @@ static int lapic_next_deadline(unsigned long delta, { u64 tsc; - /* This MSR is special and need a special fence: */ - weak_wrmsr_fence(); - tsc = rdtsc(); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); return 0; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 497ad354e123..5e8fc9809da3 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1040,16 +1040,6 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) { irq = mp_irqs[idx].srcbusirq; legacy = mp_is_legacy_irq(irq); - /* - * IRQ2 is unusable for historical reasons on systems which - * have a legacy PIC. See the comment vs. IRQ2 further down. - * - * If this gets removed at some point then the related code - * in lapic_assign_system_vectors() needs to be adjusted as - * well. - */ - if (legacy && irq == PIC_CASCADE_IR) - return -EINVAL; } mutex_lock(&ioapic_mutex); diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index f474756fc151..cc8311c4d298 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -32,8 +32,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) unsigned long flags; u32 dest; - /* x2apic MSRs are special and need a special fence: */ - weak_wrmsr_fence(); + x2apic_wrmsr_fence(); local_irq_save(flags); diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index ad7c3544b07f..662e9150ea6f 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -43,8 +43,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest) unsigned long this_cpu; unsigned long flags; - /* x2apic MSRs are special and need a special fence: */ - weak_wrmsr_fence(); + x2apic_wrmsr_fence(); local_irq_save(flags); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index e2fa0fcbaa69..55d499593e6a 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -166,6 +166,9 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, *repeat = 0; *uniform = 1; + /* Make end inclusive instead of exclusive */ + end--; + prev_match = MTRR_TYPE_INVALID; for (i = 0; i < num_var_ranges; ++i) { unsigned short start_state, end_state, inclusive; @@ -257,9 +260,6 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) int repeat; u64 partial_end; - /* Make end inclusive instead of exclusive */ - end--; - if (!mtrr_state_set) return MTRR_TYPE_INVALID; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 851fbdb99767..b983d3dc4e6c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2001,7 +2001,6 @@ static int x86_pmu_event_init(struct perf_event *event) if (err) { if (event->destroy) event->destroy(event); - event->destroy = NULL; } if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c index ec0bfbab7265..97242a9242bd 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c @@ -80,12 +80,12 @@ static struct attribute_group amd_iommu_format_group = { * sysfs events attributes *---------------------------------------------*/ struct amd_iommu_event_desc { - struct device_attribute attr; + struct kobj_attribute attr; const char *event; }; -static ssize_t _iommu_event_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t _iommu_event_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { struct amd_iommu_event_desc *event = container_of(attr, struct amd_iommu_event_desc, attr); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a6d623e43d62..2c1910f6717e 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -573,7 +572,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) struct crash_memmap_data cmd; struct crash_mem *cmem; - cmem = vzalloc(struct_size(cmem, ranges, 1)); + cmem = vzalloc(sizeof(struct crash_mem)); if (!cmem) return -ENOMEM; diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 9a1489b92782..8fc842dae3b3 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -262,23 +262,15 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) return 0; } - if (!access_ok(VERIFY_READ, buf, size)) { - fpu__clear(fpu); + if (!access_ok(VERIFY_READ, buf, size)) return -EACCES; - } fpu__activate_curr(fpu); - if (!static_cpu_has(X86_FEATURE_FPU)) { - int ret = fpregs_soft_set(current, NULL, 0, - sizeof(struct user_i387_ia32_struct), - NULL, buf); - - if (ret) - fpu__clear(fpu); - - return ret != 0; - } + if (!static_cpu_has(X86_FEATURE_FPU)) + return fpregs_soft_set(current, NULL, + 0, sizeof(struct user_i387_ia32_struct), + NULL, buf) != 0; if (use_xsave()) { struct _fpx_sw_bytes fx_sw_user; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 1ff1adbc843b..3fa200ecca62 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -292,23 +292,6 @@ static void __init setup_xstate_comp(void) } } -/* - * All supported features have either init state all zeros or are - * handled in setup_init_fpu() individually. This is an explicit - * feature list and does not use XFEATURE_MASK*SUPPORTED to catch - * newly added supported features at build time and make people - * actually look at the init state for the new feature. - */ -#define XFEATURES_INIT_FPSTATE_HANDLED \ - (XFEATURE_MASK_FP | \ - XFEATURE_MASK_SSE | \ - XFEATURE_MASK_YMM | \ - XFEATURE_MASK_OPMASK | \ - XFEATURE_MASK_ZMM_Hi256 | \ - XFEATURE_MASK_Hi16_ZMM | \ - XFEATURE_MASK_BNDREGS | \ - XFEATURE_MASK_BNDCSR) - /* * setup the xstate image representing the init state */ @@ -316,8 +299,6 @@ static void __init setup_init_fpu_buf(void) { static int on_boot_cpu = 1; - BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED); - WARN_ON_FPU(!on_boot_cpu); on_boot_cpu = 0; @@ -338,22 +319,10 @@ static void __init setup_init_fpu_buf(void) copy_kernel_to_xregs_booting(&init_fpstate.xsave); /* - * All components are now in init state. Read the state back so - * that init_fpstate contains all non-zero init state. This only - * works with XSAVE, but not with XSAVEOPT and XSAVES because - * those use the init optimization which skips writing data for - * components in init state. - * - * XSAVE could be used, but that would require to reshuffle the - * data when XSAVES is available because XSAVES uses xstate - * compaction. But doing so is a pointless exercise because most - * components have an all zeros init state except for the legacy - * ones (FP and SSE). Those can be saved with FXSAVE into the - * legacy area. Adding new features requires to ensure that init - * state is all zeroes or if not to add the necessary handling - * here. + * Dump the init state again. This is to identify the init state + * of any feature which is not represented by all zero's. */ - fxsave(&init_fpstate.fxsave); + copy_xregs_to_kernel_booting(&init_fpstate.xsave); } static int xfeature_is_supervisor(int xfeature_nr) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 2a53a63f1e70..9f669fdd2010 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -283,10 +283,8 @@ void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)) { if (handler) kvm_posted_intr_wakeup_handler = handler; - else { + else kvm_posted_intr_wakeup_handler = dummy_handler; - synchronize_rcu(); - } } EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 05ab9c8fd7a2..76eecc968565 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1015,11 +1015,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) * So clear it by resetting the current kprobe: */ regs->flags &= ~X86_EFLAGS_TF; - /* - * Since the single step (trap) has been cancelled, - * we need to restore BTF here. - */ - restore_btf(); /* * If the TF flag was set before the kprobe hit, diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 6f0d340594ca..94779f66bf49 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -124,7 +124,6 @@ int apply_relocate(Elf32_Shdr *sechdrs, *location += sym->st_value; break; case R_386_PC32: - case R_386_PLT32: /* Add the value, subtract its position */ *location += sym->st_value - (uint32_t)location; break; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 143c06f84596..877e3cb6edfb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -337,11 +337,10 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { }, { /* Handle problems with rebooting on the OptiPlex 990. */ .callback = set_pci_reboot, - .ident = "Dell OptiPlex 990 BIOS A0x", + .ident = "Dell OptiPlex 990", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), - DMI_MATCH(DMI_BIOS_VERSION, "A0"), }, }, { /* Handle problems with rebooting on Dell 300's */ @@ -419,15 +418,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { }, }, - { /* PCIe Wifi card isn't detected after reboot otherwise */ - .callback = set_pci_reboot, - .ident = "Zotac ZBOX CI327 nano", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "NA"), - DMI_MATCH(DMI_PRODUCT_NAME, "ZBOX-CI327NANO-GS-01"), - }, - }, - /* Sony */ { /* Handle problems with rebooting on Sony VGN-Z540N */ .callback = set_bios_reboot, @@ -489,20 +479,29 @@ static void emergency_vmx_disable_all(void) local_irq_disable(); /* - * Disable VMX on all CPUs before rebooting, otherwise we risk hanging - * the machine, because the CPU blocks INIT when it's in VMX root. + * We need to disable VMX on all CPUs before rebooting, otherwise + * we risk hanging up the machine, because the CPU ignore INIT + * signals when VMX is enabled. + * + * We can't take any locks and we may be on an inconsistent + * state, so we use NMIs as IPIs to tell the other CPUs to disable + * VMX and halt. * - * We can't take any locks and we may be on an inconsistent state, so - * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. + * For safety, we will avoid running the nmi_shootdown_cpus() + * stuff unnecessarily, but we don't have a way to check + * if other CPUs have VMX enabled. So we will call it only if the + * CPU we are running on has VMX enabled. * - * Do the NMI shootdown even if VMX if off on _this_ CPU, as that - * doesn't prevent a different CPU from being in VMX root operation. + * We will miss cases where VMX is not enabled on all CPUs. This + * shouldn't do much harm because KVM always enable VMX on all + * CPUs anyway. But we can miss it on the small window where KVM + * is still enabling VMX. */ - if (cpu_has_vmx()) { - /* Safely force _this_ CPU out of VMX root operation. */ - __cpu_emergency_vmxoff(); + if (cpu_has_vmx() && cpu_vmx_enabled()) { + /* Disable VMX on this CPU. */ + cpu_vmxoff(); - /* Halt and exit VMX root operation on the other CPUs. */ + /* Halt and disable VMX on the other CPUs */ nmi_shootdown_cpus(vmxoff_nmi); } diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 60ccfa4c2768..8c38784cf992 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -268,11 +268,10 @@ static volatile u32 good_2byte_insns[256 / 32] = { static bool is_prefix_bad(struct insn *insn) { - insn_byte_t p; int i; - for_each_insn_prefix(insn, i, p) { - switch (p) { + for (i = 0; i < insn->prefixes.nbytes; i++) { + switch (insn->prefixes.bytes[i]) { case 0x26: /* INAT_PFX_ES */ case 0x2E: /* INAT_PFX_CS */ case 0x36: /* INAT_PFX_DS */ @@ -712,7 +711,6 @@ static struct uprobe_xol_ops branch_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); - insn_byte_t p; int i; switch (opc1) { @@ -743,8 +741,8 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. * No one uses these insns, reject any branch insns with such prefix. */ - for_each_insn_prefix(insn, i, p) { - if (p == 0x66) + for (i = 0; i < insn->prefixes.nbytes; i++) { + if (insn->prefixes.bytes[i] == 0x66) return -ENOTSUPP; } diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index b60ffd1b3ae2..13bda3fcf42b 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -611,14 +611,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); unsigned phys_as = entry->eax & 0xff; - /* - * Use bare metal's MAXPHADDR if the CPU doesn't report guest - * MAXPHYADDR separately, or if TDP (NPT) is disabled, as the - * guest version "applies only to guests using nested paging". - */ - if (!g_phys_as || !tdp_enabled) + if (!g_phys_as) g_phys_as = phys_as; - entry->eax = g_phys_as | (virt_as << 8); entry->edx = 0; /* diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 845b88eb734b..f21d4df282fa 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -164,7 +164,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) } if (type == PERF_TYPE_RAW) - config = eventsel & AMD64_RAW_EVENT_MASK; + config = eventsel & X86_RAW_EVENT_MASK; pmc_reprogram_counter(pmc, type, config, !(eventsel & ARCH_PERFMON_EVENTSEL_USR), diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 04890ac518d0..822829f00590 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = { [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, - [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES }, + [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, }; /* mapping between fixed pmc index and intel_arch_events array */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 77bee73faebc..6938a62a3df4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -2564,11 +2564,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) svm->nested.intercept = nested_vmcb->control.intercept; svm_flush_tlb(&svm->vcpu); - svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl & - (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK); - - svm->vmcb->control.int_ctl |= V_INTR_MASKING_MASK; - + svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; else @@ -2931,7 +2927,7 @@ static int cr_interception(struct vcpu_svm *svm) err = 0; if (cr >= 16) { /* mov to cr */ cr -= 16; - val = kvm_register_readl(&svm->vcpu, reg); + val = kvm_register_read(&svm->vcpu, reg); switch (cr) { case 0: if (!check_selective_cr0_intercepted(svm, val)) @@ -2976,7 +2972,7 @@ static int cr_interception(struct vcpu_svm *svm) kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } - kvm_register_writel(&svm->vcpu, reg, val); + kvm_register_write(&svm->vcpu, reg, val); } kvm_complete_insn_gp(&svm->vcpu, err); @@ -3008,13 +3004,13 @@ static int dr_interception(struct vcpu_svm *svm) if (dr >= 16) { /* mov to DRn */ if (!kvm_require_dr(&svm->vcpu, dr - 16)) return 1; - val = kvm_register_readl(&svm->vcpu, reg); + val = kvm_register_read(&svm->vcpu, reg); kvm_set_dr(&svm->vcpu, dr - 16, val); } else { if (!kvm_require_dr(&svm->vcpu, dr)) return 1; kvm_get_dr(&svm->vcpu, dr, &val); - kvm_register_writel(&svm->vcpu, reg, val); + kvm_register_write(&svm->vcpu, reg, val); } skip_emulated_instruction(&svm->vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 910100257df9..ef920da07518 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2172,10 +2172,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (!msr_info->host_initiated) { s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; adjust_tsc_offset_guest(vcpu, adj); - /* Before back to guest, tsc_timestamp must be adjusted - * as well, otherwise guest's percpu pvclock time could jump. - */ - kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } vcpu->arch.ia32_tsc_adjust_msr = data; } @@ -4417,13 +4413,6 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) access |= PFERR_USER_MASK; - return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, - access, exception); -} - -int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, - unsigned int bytes, struct x86_exception *exception) -{ /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED * is returned, but our callers are not ready for that and they blindly @@ -4431,6 +4420,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, * uninitialized kernel stack memory into cr2 and error code. */ memset(exception, 0, sizeof(*exception)); + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, + access, exception); +} + +int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, + unsigned int bytes, struct x86_exception *exception) +{ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } @@ -6020,7 +6016,6 @@ void kvm_arch_exit(void) unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); - cancel_work_sync(&pvclock_gtod_work); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); @@ -6728,8 +6723,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.dr6, 6); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; - } else if (unlikely(hw_breakpoint_active())) { - set_debugreg(0, 7); } kvm_x86_ops->run(vcpu); diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index 8a3bc242c5e9..518532e6a3fa 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -239,7 +239,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info) rv->err = wrmsr_safe_regs(rv->regs); } -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) { int err; struct msr_regs_info rv; @@ -252,7 +252,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) } EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) { int err; struct msr_regs_info rv; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a935039c20be..97b6b0164dcb 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1179,21 +1179,21 @@ int kern_addr_valid(unsigned long addr) return 0; pud = pud_offset(pgd, addr); - if (!pud_present(*pud)) + if (pud_none(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) + if (pmd_none(*pmd)) return 0; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); - if (!pte_present(*pte)) + if (pte_none(*pte)) return 0; return pfn_valid(pte_pfn(*pte)); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 289518bb0e8d..3ed4753280aa 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -720,8 +720,6 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) } free_page((unsigned long)pmd_sv); - - pgtable_pmd_page_dtor(virt_to_page(pmd)); free_page((unsigned long)pmd); return 1; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 82f8cd0a3af9..bea13c35979e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1038,16 +1038,7 @@ common_load: } if (image) { - /* - * When populating the image, assert that: - * - * i) We do not write beyond the allocated space, and - * ii) addrs[i] did not change from the prior run, in order - * to validate assumptions made for computing branch - * displacements. - */ - if (unlikely(proglen + ilen > oldproglen || - proglen + ilen != addrs[i])) { + if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpf_jit_compile fatal error\n"); return -EFAULT; } diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk index a4cf678cf5c8..fd1ab80be0de 100644 --- a/arch/x86/tools/chkobjdump.awk +++ b/arch/x86/tools/chkobjdump.awk @@ -10,7 +10,6 @@ BEGIN { /^GNU objdump/ { verstr = "" - gsub(/\(.*\)/, ""); for (i = 3; i <= NF; i++) if (match($(i), "^[0-9]")) { verstr = $(i); diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index d1c3f82c7882..5b6c8486a0be 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -839,11 +839,9 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, case R_386_PC32: case R_386_PC16: case R_386_PC8: - case R_386_PLT32: /* - * NONE can be ignored and PC relative relocations don't need - * to be adjusted. Because sym must be defined, R_386_PLT32 can - * be treated the same way as R_386_PC32. + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. */ break; @@ -884,11 +882,9 @@ static int do_reloc_real(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, case R_386_PC32: case R_386_PC16: case R_386_PC8: - case R_386_PLT32: /* - * NONE can be ignored and PC relative relocations don't need - * to be adjusted. Because sym must be defined, R_386_PLT32 can - * be treated the same way as R_386_PC32. + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. */ break; diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 40ecacb2c54b..e6552275320b 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -9,7 +9,6 @@ #include #include /* XXX This should get the constants from libc */ #include -#include long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) { @@ -33,7 +32,7 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) switch (code) { case ARCH_SET_FS: case ARCH_SET_GS: - ret = restore_pid_registers(pid, ¤t->thread.regs.regs); + ret = restore_registers(pid, ¤t->thread.regs.regs); if (ret) return ret; break; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 28725a6ed5de..79aff24eed65 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -861,8 +861,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) preempt_enable(); } -static unsigned xen_convert_trap_info(const struct desc_ptr *desc, - struct trap_info *traps, bool full) +static void xen_convert_trap_info(const struct desc_ptr *desc, + struct trap_info *traps) { unsigned in, out, count; @@ -872,18 +872,17 @@ static unsigned xen_convert_trap_info(const struct desc_ptr *desc, for (in = out = 0; in < count; in++) { gate_desc *entry = (gate_desc*)(desc->address) + in; - if (cvt_gate_to_trap(in, entry, &traps[out]) || full) + if (cvt_gate_to_trap(in, entry, &traps[out])) out++; } - - return out; + traps[out].address = 0; } void xen_copy_trap_info(struct trap_info *traps) { const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); - xen_convert_trap_info(desc, traps, true); + xen_convert_trap_info(desc, traps); } /* Load a new IDT into Xen. In principle this can be per-CPU, so we @@ -893,7 +892,6 @@ static void xen_load_idt(const struct desc_ptr *desc) { static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; - unsigned out; trace_xen_cpu_load_idt(desc); @@ -901,8 +899,7 @@ static void xen_load_idt(const struct desc_ptr *desc) memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); - out = xen_convert_trap_info(desc, traps, false); - memset(&traps[out], 0, sizeof(traps[0])); + xen_convert_trap_info(desc, traps); xen_mc_flush(); if (HYPERVISOR_set_trap_table(traps)) diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index af0ebe18248a..cab9f766bb06 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -623,8 +623,8 @@ int xen_alloc_p2m_entry(unsigned long pfn) } /* Expanded the p2m? */ - if (pfn >= xen_p2m_last_pfn) { - xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE); + if (pfn > xen_p2m_last_pfn) { + xen_p2m_last_pfn = pfn; HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; } @@ -723,12 +723,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, for (i = 0; i < count; i++) { unsigned long mfn, pfn; - struct gnttab_unmap_grant_ref unmap[2]; - int rc; /* Do not add to override if the map failed. */ - if (map_ops[i].status != GNTST_okay || - (kmap_ops && kmap_ops[i].status != GNTST_okay)) + if (map_ops[i].status) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { @@ -742,46 +739,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); - if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) - continue; - - /* - * Signal an error for this slot. This in turn requires - * immediate unmapping. - */ - map_ops[i].status = GNTST_general_error; - unmap[0].host_addr = map_ops[i].host_addr, - unmap[0].handle = map_ops[i].handle; - map_ops[i].handle = ~0; - if (map_ops[i].flags & GNTMAP_device_map) - unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; - else - unmap[0].dev_bus_addr = 0; - - if (kmap_ops) { - kmap_ops[i].status = GNTST_general_error; - unmap[1].host_addr = kmap_ops[i].host_addr, - unmap[1].handle = kmap_ops[i].handle; - kmap_ops[i].handle = ~0; - if (kmap_ops[i].flags & GNTMAP_device_map) - unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; - else - unmap[1].dev_bus_addr = 0; + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { + ret = -ENOMEM; + goto out; } - - /* - * Pre-populate both status fields, to be recognizable in - * the log message below. - */ - unmap[0].status = 1; - unmap[1].status = 1; - - rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - unmap, 1 + !!kmap_ops); - if (rc || unmap[0].status != GNTST_okay || - unmap[1].status != GNTST_okay) - pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", - rc, unmap[0].status, unmap[1].status); } out: @@ -802,15 +763,17 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); - if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) - set_phys_to_machine(pfn, INVALID_P2M_ENTRY); - else + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { ret = -EINVAL; + goto out; + } + + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, - kunmap_ops, count) ?: ret; - + kunmap_ops, count); +out: return ret; } EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 8a0ebb171363..bdad9be4a729 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -18,7 +18,7 @@ config XTENSA select HAVE_DMA_ATTRS select HAVE_EXIT_THREAD select HAVE_FUNCTION_TRACER - select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX + select HAVE_FUTEX_CMPXCHG if !MMU select HAVE_IRQ_TIME_ACCOUNTING select HAVE_OPROFILE select HAVE_PERF_EVENTS diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index fbbc24b914e3..441694464b1e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -144,7 +144,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq) void __init init_IRQ(void) { -#ifdef CONFIG_USE_OF +#ifdef CONFIG_OF irqchip_init(); #else #ifdef CONFIG_HAVE_SMP diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c index 5d264ae517f5..92d785fefb6d 100644 --- a/arch/xtensa/platforms/iss/console.c +++ b/arch/xtensa/platforms/iss/console.c @@ -186,13 +186,9 @@ static const struct tty_operations serial_ops = { int __init rs_init(void) { - int ret; + tty_port_init(&serial_port); serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES); - if (!serial_driver) - return -ENOMEM; - - tty_port_init(&serial_port); printk ("%s %s\n", serial_name, serial_version); @@ -212,15 +208,8 @@ int __init rs_init(void) tty_set_operations(serial_driver, &serial_ops); tty_port_link_device(&serial_port, serial_driver, 0); - ret = tty_register_driver(serial_driver); - if (ret) { - pr_err("Couldn't register serial driver\n"); - tty_driver_kref_put(serial_driver); - tty_port_destroy(&serial_port); - - return ret; - } - + if (tty_register_driver(serial_driver)) + panic("Couldn't register serial driver\n"); return 0; } diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index af7aca70a861..3c3ace2c46b6 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -21,6 +21,7 @@ #include #define SIMDISK_MAJOR 240 +#define SECTOR_SHIFT 9 #define SIMDISK_MINORS 1 #define MAX_SIMDISK_COUNT 10 diff --git a/block/blk-settings.c b/block/blk-settings.c index 8952a8f3f00a..7db6c45dc02c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -495,14 +495,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) } EXPORT_SYMBOL(blk_queue_stack_limits); -static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) -{ - sectors = round_down(sectors, lbs >> SECTOR_SHIFT); - if (sectors < PAGE_SIZE >> SECTOR_SHIFT) - sectors = PAGE_SIZE >> SECTOR_SHIFT; - return sectors; -} - /** * blk_stack_limits - adjust queue_limits for stacked devices * @t: the stacking driver limits (top device) @@ -615,10 +607,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } - t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); - t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); - t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); - /* Discard alignment and granularity */ if (b->discard_granularity) { alignment = queue_limit_discard_alignment(b, start); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index fbd08c4569ce..17bdd6b55beb 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1588,7 +1588,6 @@ int blk_throtl_init(struct request_queue *q) void blk_throtl_exit(struct request_queue *q) { BUG_ON(!q->td); - del_timer_sync(&q->td->service_queue.pending_timer); throtl_shutdown_wq(q); blkcg_deactivate_policy(q, &blkcg_policy_throtl); kfree(q->td); diff --git a/block/genhd.c b/block/genhd.c index 94ef33953a13..53a931b30d78 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -158,17 +158,14 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) part = rcu_dereference(ptbl->part[piter->idx]); if (!part) continue; - get_device(part_to_dev(part)); - piter->part = part; if (!part_nr_sects_read(part) && !(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && - piter->idx == 0)) { - put_device(part_to_dev(part)); - piter->part = NULL; + piter->idx == 0)) continue; - } + get_device(part_to_dev(part)); + piter->part = part; piter->idx += inc; break; } diff --git a/block/ioprio.c b/block/ioprio.c index 284bdfa3aacf..01b8116298a1 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -202,7 +202,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) pgrp = task_pgrp(current); else pgrp = find_vpid(who); - read_lock(&tasklist_lock); do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { tmpio = get_task_ioprio(p); if (tmpio < 0) @@ -212,8 +211,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) else ret = ioprio_best(ret, tmpio); } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); - read_unlock(&tasklist_lock); - break; case IOPRIO_WHO_USER: uid = make_kuid(current_user_ns(), who); diff --git a/certs/Makefile b/certs/Makefile index f82be3690250..9d89b992eee6 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -43,19 +43,11 @@ endif redirect_openssl = 2>&1 quiet_redirect_openssl = 2>&1 silent_redirect_openssl = 2>/dev/null -openssl_available = $(shell openssl help 2>/dev/null && echo yes) # We do it this way rather than having a boolean option for enabling an # external private key, because 'make randconfig' might enable such a # boolean option and we unfortunately can't make it depend on !RANDCONFIG. ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem") - -ifeq ($(openssl_available),yes) -X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null) - -$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem")) -endif - $(obj)/signing_key.pem: $(obj)/x509.genkey @$(kecho) "###" @$(kecho) "### Now generating an X.509 key pair to be used for signing modules." diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 62e11835f220..85082574c515 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -138,14 +138,12 @@ static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); - int ret; - ret = crypto_aead_encrypt(req); + padata->info = crypto_aead_encrypt(req); - if (ret == -EINPROGRESS) + if (padata->info == -EINPROGRESS) return; - padata->info = ret; padata_do_serial(padata); } @@ -182,14 +180,12 @@ static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); - int ret; - ret = crypto_aead_decrypt(req); + padata->info = crypto_aead_decrypt(req); - if (ret == -EINPROGRESS) + if (padata->info == -EINPROGRESS) return; - padata->info = ret; padata_do_serial(padata); } diff --git a/crypto/shash.c b/crypto/shash.c index 4490269eafc3..b086b0174b82 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -24,24 +24,12 @@ static const struct crypto_type crypto_shash_type; -static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, - unsigned int keylen) +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) { return -ENOSYS; } - -/* - * Check whether an shash algorithm has a setkey function. - * - * For CFI compatibility, this must not be an inline function. This is because - * when CFI is enabled, modules won't get the same address for shash_no_setkey - * (if it were exported, which inlining would require) as the core kernel will. - */ -bool crypto_shash_alg_has_setkey(struct shash_alg *alg) -{ - return alg->setkey != shash_no_setkey; -} -EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey); +EXPORT_SYMBOL_GPL(shash_no_setkey); static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 1738eb0fa7db..48fc3ad13a4b 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -320,9 +320,6 @@ static bool matching_id(const char *idstr, const char *list_id) { int i; - if (strlen(idstr) != strlen(list_id)) - return false; - if (memcmp(idstr, list_id, 3)) return false; diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index f178d11597c0..faa97604d878 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -256,8 +256,6 @@ extern struct acpi_bit_register_info ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a); ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b); -ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a_s0); -ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b_s0); /***************************************************************************** * diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index c20b7dfec7b2..77930683ab7d 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -1016,8 +1016,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { - return_ACPI_STATUS - (status); + goto cleanup; } return_desc = temp_desc; diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index e4998cc0ce28..e5599f610808 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -184,13 +184,17 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { + acpi_status status; u8 sleep_type_value; ACPI_FUNCTION_TRACE(hw_extended_wake_prep); - if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { + status = acpi_get_sleep_type_data(ACPI_STATE_S0, + &acpi_gbl_sleep_type_a, + &acpi_gbl_sleep_type_b); + if (ACPI_SUCCESS(status)) { sleep_type_value = - ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) & + ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) & ACPI_X_SLEEP_TYPE_MASK); (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE), diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 7e44ba8c6a1a..7d21cae6d602 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -217,7 +217,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) { - acpi_status status = AE_OK; + acpi_status status; struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; u32 pm1a_control; @@ -230,7 +230,10 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) * This is unclear from the ACPI Spec, but it is required * by some machines. */ - if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) { + status = acpi_get_sleep_type_data(ACPI_STATE_S0, + &acpi_gbl_sleep_type_a, + &acpi_gbl_sleep_type_b); + if (ACPI_SUCCESS(status)) { sleep_type_reg_info = acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE); sleep_enable_reg_info = @@ -251,9 +254,9 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) /* Insert the SLP_TYP bits */ - pm1a_control |= (acpi_gbl_sleep_type_a_s0 << + pm1a_control |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position); - pm1b_control |= (acpi_gbl_sleep_type_b_s0 << + pm1b_control |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position); /* Write the control registers and ignore any errors */ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index b04e2b0f6224..d62a61612b3f 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -372,13 +372,6 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state) return_ACPI_STATUS(status); } - status = acpi_get_sleep_type_data(ACPI_STATE_S0, - &acpi_gbl_sleep_type_a_s0, - &acpi_gbl_sleep_type_b_s0); - if (ACPI_FAILURE(status)) { - acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID; - } - /* Execute the _PTS method (Prepare To Sleep) */ arg_list.count = 1; diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index d90b2cf310fb..9f8b088e21d7 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -439,7 +439,6 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); - return; } ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index e4f1cb67ba12..b719ab3090bb 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -187,7 +187,7 @@ static int acpi_battery_is_charged(struct acpi_battery *battery) return 1; /* fallback to using design values for broken batteries */ - if (battery->design_capacity <= battery->capacity_now) + if (battery->design_capacity == battery->capacity_now) return 1; /* we don't do any sort of metric based on percentages */ diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d016eba51a95..521d1b28760c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1087,7 +1087,6 @@ static int __init acpi_init(void) init_acpi_device_notify(); result = acpi_bus_init(); if (result) { - kobject_put(acpi_kobj); disable_acpi(); return result; } diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index ea4c7c93a920..435bd0ffc8c0 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -37,8 +37,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, sizeof(struct acpi_table_header))) return -EFAULT; uncopied_bytes = max_size = table.length; - /* make sure the buf is not allocated */ - kfree(buf); buf = kzalloc(max_size, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -52,7 +50,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, (*ppos + count < count) || (count > uncopied_bytes)) { kfree(buf); - buf = NULL; return -EINVAL; } @@ -74,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); } + kfree(buf); return count; } diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index c201aaf287dc..a899a7abcf63 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -259,12 +259,20 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; - if (adev->data.of_compatible) - len = create_of_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - else - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + if (len < 0) + return len; + + env->buflen += len; + if (!adev->data.of_compatible) + return 0; + + if (len > 0 && add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + + len = create_of_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); if (len < 0) return len; @@ -450,7 +458,7 @@ static ssize_t description_show(struct device *dev, (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, acpi_dev->pnp.str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, - PAGE_SIZE - 1); + PAGE_SIZE); buf[result++] = '\n'; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 69fec2d3a1f5..175c86bee3a9 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -28,7 +28,6 @@ #include #include #include /* need_resched() */ -#include #include #include #include @@ -573,37 +572,10 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, return; } -static int acpi_cst_latency_cmp(const void *a, const void *b) -{ - const struct acpi_processor_cx *x = a, *y = b; - - if (!(x->valid && y->valid)) - return 0; - if (x->latency > y->latency) - return 1; - if (x->latency < y->latency) - return -1; - return 0; -} -static void acpi_cst_latency_swap(void *a, void *b, int n) -{ - struct acpi_processor_cx *x = a, *y = b; - u32 tmp; - - if (!(x->valid && y->valid)) - return; - tmp = x->latency; - x->latency = y->latency; - y->latency = tmp; -} - static int acpi_processor_power_verify(struct acpi_processor *pr) { unsigned int i; unsigned int working = 0; - unsigned int last_latency = 0; - unsigned int last_type = 0; - bool buggy_latency = false; pr->power.timer_broadcast_on_state = INT_MAX; @@ -627,24 +599,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) } if (!cx->valid) continue; - if (cx->type >= last_type && cx->latency < last_latency) - buggy_latency = true; - last_latency = cx->latency; - last_type = cx->type; lapic_timer_check_state(i, pr, cx); tsc_check_state(cx->type); working++; } - if (buggy_latency) { - pr_notice("FW issue: working around C-state latencies out of order\n"); - sort(&pr->power.states[1], max_cstate, - sizeof(struct acpi_processor_cx), - acpi_cst_latency_cmp, - acpi_cst_latency_swap); - } - lapic_timer_propagate_broadcast(pr); return (working); diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index e6003d2baa45..627f8fbb5e9a 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -506,7 +506,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, ret = c->preproc(ares, c->preproc_data); if (ret < 0) { c->error = ret; - return AE_ABORT_METHOD; + return AE_CTRL_TERMINATE; } else if (ret > 0) { return AE_OK; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 90ed17aacaa7..2ab4568aaddd 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -564,8 +564,6 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, if (!device) return -EINVAL; - *device = NULL; - status = acpi_get_data_full(handle, acpi_scan_drop_device, (void **)device, callback); if (ACPI_FAILURE(status) || !*device) { diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index b4826335ad0b..82707f9824ca 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -188,8 +188,6 @@ struct acpi_thermal { int tz_enabled; int kelvin_offset; struct work_struct thermal_check_work; - struct mutex thermal_check_lock; - atomic_t thermal_check_count; }; /* -------------------------------------------------------------------------- @@ -515,6 +513,16 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) return 0; } +static void acpi_thermal_check(void *data) +{ + struct acpi_thermal *tz = data; + + if (!tz->tz_enabled) + return; + + thermal_zone_device_update(tz->thermal_zone); +} + /* sys I/F for generic thermal sysfs support */ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) @@ -548,8 +556,6 @@ static int thermal_get_mode(struct thermal_zone_device *thermal, return 0; } -static void acpi_thermal_check_fn(struct work_struct *work); - static int thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { @@ -575,7 +581,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s kernel ACPI thermal control\n", tz->tz_enabled ? "Enable" : "Disable")); - acpi_thermal_check_fn(&tz->thermal_check_work); + acpi_thermal_check(tz); } return 0; } @@ -944,12 +950,6 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) Driver Interface -------------------------------------------------------------------------- */ -static void acpi_queue_thermal_check(struct acpi_thermal *tz) -{ - if (!work_pending(&tz->thermal_check_work)) - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); -} - static void acpi_thermal_notify(struct acpi_device *device, u32 event) { struct acpi_thermal *tz = acpi_driver_data(device); @@ -960,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: - acpi_queue_thermal_check(tz); + acpi_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); - acpi_queue_thermal_check(tz); + acpi_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_THERMAL_NOTIFY_DEVICES: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); - acpi_queue_thermal_check(tz); + acpi_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; @@ -1070,27 +1070,7 @@ static void acpi_thermal_check_fn(struct work_struct *work) { struct acpi_thermal *tz = container_of(work, struct acpi_thermal, thermal_check_work); - - if (!tz->tz_enabled) - return; - /* - * In general, it is not sufficient to check the pending bit, because - * subsequent instances of this function may be queued after one of them - * has started running (e.g. if _TMP sleeps). Avoid bailing out if just - * one of them is running, though, because it may have done the actual - * check some time ago, so allow at least one of them to block on the - * mutex while another one is running the update. - */ - if (!atomic_add_unless(&tz->thermal_check_count, -1, 1)) - return; - - mutex_lock(&tz->thermal_check_lock); - - thermal_zone_device_update(tz->thermal_zone); - - atomic_inc(&tz->thermal_check_count); - - mutex_unlock(&tz->thermal_check_lock); + acpi_thermal_check(tz); } static int acpi_thermal_add(struct acpi_device *device) @@ -1122,8 +1102,6 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; - atomic_set(&tz->thermal_check_count, 3); - mutex_init(&tz->thermal_check_lock); INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), @@ -1189,7 +1167,7 @@ static int acpi_thermal_resume(struct device *dev) tz->state.active |= tz->trips.active[i].flags.enabled; } - acpi_queue_thermal_check(tz); + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); return AE_OK; } diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f8e19ac8e328..1accc01fb0ca 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -275,11 +275,10 @@ static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - int ret = 0; + int ret; pm_runtime_get_sync(dev); - if (drv->remove) - ret = drv->remove(pcdev); + ret = drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ @@ -296,9 +295,7 @@ static int amba_remove(struct device *dev) static void amba_shutdown(struct device *dev) { struct amba_driver *drv = to_amba_driver(dev->driver); - - if (drv->shutdown) - drv->shutdown(to_amba_device(dev)); + drv->shutdown(to_amba_device(dev)); } /** @@ -311,13 +308,12 @@ static void amba_shutdown(struct device *dev) */ int amba_driver_register(struct amba_driver *drv) { - if (!drv->probe) - return -EINVAL; - drv->drv.bus = &amba_bustype; - drv->drv.probe = amba_probe; - drv->drv.remove = amba_remove; - drv->drv.shutdown = amba_shutdown; + +#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn + SETFN(probe); + SETFN(remove); + SETFN(shutdown); return driver_register(&drv->drv); } @@ -360,6 +356,9 @@ int amba_device_add(struct amba_device *dev, struct resource *parent) void __iomem *tmp; int i, ret; + WARN_ON(dev->irq[0] == (unsigned int)-1); + WARN_ON(dev->irq[1] == (unsigned int)-1); + ret = request_resource(parent, &dev->res); if (ret) goto err_out; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 08e75d72ddff..ef8f9dc41a55 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -552,7 +552,6 @@ struct binder_proc { struct task_struct *tsk; struct files_struct *files; struct mutex files_lock; - const struct cred *cred; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; @@ -2504,7 +2503,7 @@ static int binder_translate_binder(struct flat_binder_object *fp, ret = -EINVAL; goto done; } - if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { ret = -EPERM; goto done; } @@ -2550,7 +2549,7 @@ static int binder_translate_handle(struct flat_binder_object *fp, proc->pid, thread->pid, fp->handle); return -EINVAL; } - if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { ret = -EPERM; goto done; } @@ -2634,7 +2633,7 @@ static int binder_translate_fd(int fd, ret = -EBADF; goto err_fget; } - ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); + ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); if (ret < 0) { ret = -EPERM; goto err_security; @@ -3025,8 +3024,8 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_invalid_target_handle; } - if (security_binder_transaction(proc->cred, - target_proc->cred) < 0) { + if (security_binder_transaction(proc->tsk, + target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; return_error_param = -EPERM; return_error_line = __LINE__; @@ -4162,7 +4161,7 @@ retry: e->cmd = BR_OK; ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, e->cmd); + binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_inner_proc_unlock(proc); @@ -4556,7 +4555,6 @@ static void binder_free_proc(struct binder_proc *proc) BUG_ON(!list_empty(&proc->delivered_death)); binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); - put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); kfree(proc); } @@ -4628,20 +4626,23 @@ static int binder_thread_release(struct binder_proc *proc, } /* - * If this thread used poll, make sure we remove the waitqueue from any - * poll data structures holding it. + * If this thread used poll, make sure we remove the waitqueue + * from any epoll data structures holding it with POLLFREE. + * waitqueue_active() is safe to use here because we're holding + * the inner lock. */ - if (thread->looper & BINDER_LOOPER_STATE_POLL) - wake_up_pollfree(&thread->wait); + if ((thread->looper & BINDER_LOOPER_STATE_POLL) && + waitqueue_active(&thread->wait)) { + wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + } binder_inner_proc_unlock(thread->proc); /* - * This is needed to avoid races between wake_up_pollfree() above and - * someone else removing the last entry from the queue for other reasons - * (e.g. ep_remove_wait_queue() being called due to an epoll file - * descriptor being closed). Such other users hold an RCU read lock, so - * we can be sure they're done after we call synchronize_rcu(). + * This is needed to avoid races between wake_up_poll() above and + * and ep_remove_waitqueue() called for other reasons (eg the epoll file + * descriptor being closed); ep_remove_waitqueue() holds an RCU read + * lock, so we can be sure it's done after calling synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); @@ -4759,7 +4760,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, ret = -EBUSY; goto out; } - ret = security_binder_set_context_mgr(proc->cred); + ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto out; if (uid_valid(context->binder_context_mgr_uid)) { @@ -5080,7 +5081,6 @@ static int binder_open(struct inode *nodp, struct file *filp) atomic_set(&proc->tmp_ref, 0); get_task_struct(current->group_leader); proc->tsk = current->group_leader; - proc->cred = get_cred(filp->f_cred); mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); if (binder_supported_policy(current->policy)) { diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c index 98b4f0d898d6..b26437430163 100644 --- a/drivers/ata/ahci_sunxi.c +++ b/drivers/ata/ahci_sunxi.c @@ -165,7 +165,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap) } static const struct ata_port_info ahci_sunxi_port_info = { - .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM, + .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_platform_ops, diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 8839ad6b73e3..65371e1befe8 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -516,13 +516,11 @@ int ahci_platform_init_host(struct platform_device *pdev, int i, irq, n_ports, rc; irq = platform_get_irq(pdev, 0); - if (irq < 0) { + if (irq <= 0) { if (irq != -EPROBE_DEFER) dev_err(dev, "no irq\n"); return irq; } - if (!irq) - return -EINVAL; hpriv->irq = irq; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a4b589d03642..8ed3f6d75ff1 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2077,25 +2077,6 @@ static inline u8 ata_dev_knobble(struct ata_device *dev) return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } -static bool ata_dev_check_adapter(struct ata_device *dev, - unsigned short vendor_id) -{ - struct pci_dev *pcidev = NULL; - struct device *parent_dev = NULL; - - for (parent_dev = dev->tdev.parent; parent_dev != NULL; - parent_dev = parent_dev->parent) { - if (dev_is_pci(parent_dev)) { - pcidev = to_pci_dev(parent_dev); - if (pcidev->vendor == vendor_id) - return true; - break; - } - } - - return false; -} - static int ata_dev_config_ncq(struct ata_device *dev, char *desc, size_t desc_sz) { @@ -2112,13 +2093,6 @@ static int ata_dev_config_ncq(struct ata_device *dev, snprintf(desc, desc_sz, "NCQ (not used)"); return 0; } - - if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI && - ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) { - snprintf(desc, desc_sz, "NCQ (not used)"); - return 0; - } - if (ap->flags & ATA_FLAG_NCQ) { hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); dev->flags |= ATA_DFLAG_NCQ; @@ -4153,8 +4127,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, - /* Similar story with ASMedia 1092 */ - { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE }, /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, @@ -4297,18 +4269,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NO_NCQ_ON_ATI, }, - { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | - ATA_HORKAGE_ZERO_AFTER_TRIM | - ATA_HORKAGE_NO_NCQ_ON_ATI, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, - { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT @@ -6061,7 +6026,7 @@ int ata_host_start(struct ata_host *host) have_stop = 1; } - if (host->ops && host->ops->host_stop) + if (host->ops->host_stop) have_stop = 1; if (have_stop) { @@ -6551,8 +6516,6 @@ static int __init ata_parse_force_one(char **cur, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, - { "noncqati", .horkage_on = ATA_HORKAGE_NO_NCQ_ON_ATI }, - { "ncqati", .horkage_off = ATA_HORKAGE_NO_NCQ_ON_ATI }, { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 23c8dd7dc977..7db76b5c7ada 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -114,12 +114,6 @@ static const unsigned long ata_eh_identify_timeouts[] = { ULONG_MAX, }; -static const unsigned long ata_eh_revalidate_timeouts[] = { - 15000, /* Some drives are slow to read log pages when waking-up */ - 15000, /* combined time till here is enough even for media access */ - ULONG_MAX, -}; - static const unsigned long ata_eh_flush_timeouts[] = { 15000, /* be generous with flush */ 15000, /* ditto */ @@ -156,8 +150,6 @@ static const struct ata_eh_cmd_timeout_ent ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), .timeouts = ata_eh_identify_timeouts, }, - { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT), - .timeouts = ata_eh_revalidate_timeouts, }, { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), .timeouts = ata_eh_other_timeouts, }, { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index a6b1a7556d37..80fe0f6fed29 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -819,19 +819,12 @@ static int arasan_cf_probe(struct platform_device *pdev) else quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */ - /* - * If there's an error getting IRQ (or we do get IRQ0), - * support only PIO - */ - ret = platform_get_irq(pdev, 0); - if (ret > 0) { - acdev->irq = ret; + /* if irq is 0, support only PIO */ + acdev->irq = platform_get_irq(pdev, 0); + if (acdev->irq) irq_handler = arasan_cf_interrupt; - } else if (ret == -EPROBE_DEFER) { - return ret; - } else { + else quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA; - } acdev->pbase = res->start; acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start, diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c index ebdd2dfabbeb..634c814cbeda 100644 --- a/drivers/ata/pata_ep93xx.c +++ b/drivers/ata/pata_ep93xx.c @@ -927,7 +927,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev) /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ irq = platform_get_irq(pdev, 0); if (irq < 0) { - err = irq; + err = -ENXIO; goto err_rel_gpio; } diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 44cc02afaa8b..3ba843f5cdc0 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -919,20 +919,6 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); - /* - * HPT371 chips physically have only one channel, the secondary one, - * but the primary channel registers do exist! Go figure... - * So, we manually disable the non-existing channel here - * (if the BIOS hasn't done this already). - */ - if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { - u8 mcr1; - - pci_read_config_byte(dev, 0x50, &mcr1); - mcr1 &= ~0x04; - pci_write_config_byte(dev, 0x50, mcr1); - } - /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed @@ -964,14 +950,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u16 sr; + u8 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_word(dev, 0x78, &sr); + pci_read_config_byte(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index fb8d1f68f36f..abda44183512 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -169,12 +169,8 @@ static int ixp4xx_pata_probe(struct platform_device *pdev) return -ENOMEM; irq = platform_get_irq(pdev, 0); - if (irq > 0) + if (irq) irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); - else if (irq < 0) - return irq; - else - return -EINVAL; /* Setup expansion bus chip selects */ *data->cs0_cfg = data->cs0_bits; diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 55fcdb798002..bce2a8ca4678 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -328,8 +328,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - __le32 pad = 0; - + __le32 pad; if (rw == READ) { pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); memcpy(buf + buflen - slop, &pad, slop); @@ -717,8 +716,7 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); if (unlikely(slop)) { - __le32 pad = 0; - + __le32 pad; if (rw == WRITE) { memcpy(&pad, buf + buflen - slop, slop); iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 909de33f9158..27245957eee3 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -909,11 +909,10 @@ static int octeon_cf_probe(struct platform_device *pdev) return -EINVAL; } + irq_handler = octeon_cf_interrupt; i = platform_get_irq(dma_dev, 0); - if (i > 0) { + if (i > 0) irq = i; - irq_handler = octeon_cf_interrupt; - } } of_node_put(dma_node); } diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index 76c550e160f6..c8b6a780a290 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c @@ -120,12 +120,10 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { + if (irq <= 0) { dev_err(&pdev->dev, "no IRQ resource found\n"); - return irq; + return -ENOENT; } - if (!irq) - return -EINVAL; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 6d2e54209ae6..100b5a3621ef 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -1406,14 +1406,6 @@ static int sata_fsl_init_controller(struct ata_host *host) return 0; } -static void sata_fsl_host_stop(struct ata_host *host) -{ - struct sata_fsl_host_priv *host_priv = host->private_data; - - iounmap(host_priv->hcr_base); - kfree(host_priv); -} - /* * scsi mid-layer and libata interface structures */ @@ -1446,8 +1438,6 @@ static struct ata_port_operations sata_fsl_ops = { .port_start = sata_fsl_port_start, .port_stop = sata_fsl_port_stop, - .host_stop = sata_fsl_host_stop, - .pmp_attach = sata_fsl_pmp_attach, .pmp_detach = sata_fsl_pmp_detach, }; @@ -1502,9 +1492,9 @@ static int sata_fsl_probe(struct platform_device *ofdev) host_priv->ssr_base = ssr_base; host_priv->csr_base = csr_base; - irq = platform_get_irq(ofdev, 0); - if (irq < 0) { - retval = irq; + irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + if (!irq) { + dev_err(&ofdev->dev, "invalid irq from platform\n"); goto error_exit_with_cleanup; } host_priv->irq = irq; @@ -1581,6 +1571,10 @@ static int sata_fsl_remove(struct platform_device *ofdev) ata_host_detach(host); + irq_dispose_mapping(host_priv->irq); + iounmap(host_priv->hcr_base); + kfree(host_priv); + return 0; } diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index 77691154d2f1..8638d575b2b9 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -483,12 +483,10 @@ static int ahci_highbank_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { + if (irq <= 0) { dev_err(dev, "no irq\n"); - return irq; - } - if (!irq) return -EINVAL; + } hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 509f63891fb0..5718dc94c90c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -3909,8 +3909,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; default: - dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx); - return -EINVAL; + dev_err(host->dev, "BUG: invalid board index %u\n", board_idx); + return 1; } hpriv->hp_flags = hp_flags; @@ -4101,10 +4101,6 @@ static int mv_platform_probe(struct platform_device *pdev) n_ports = mv_platform_data->n_ports; irq = platform_get_irq(pdev, 0); } - if (irq < 0) - return irq; - if (!irq) - return -EINVAL; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 3d5ad2bc809b..340a1ee79d28 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2278,8 +2278,7 @@ out: return rc; err_eni_release: - dev->phy = NULL; - iounmap(ENI_DEV(dev)->ioaddr); + eni_do_release(dev); err_unregister: atm_dev_deregister(dev); err_free_consistent: diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 40644670cff2..feb023d7eebd 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -261,7 +261,7 @@ static int idt77105_start(struct atm_dev *dev) { unsigned long flags; - if (!(dev->phy_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) + if (!(dev->dev_data = kmalloc(sizeof(struct idt77105_priv),GFP_KERNEL))) return -ENOMEM; PRIV(dev)->dev = dev; spin_lock_irqsave(&idt77105_priv_lock, flags); @@ -338,7 +338,7 @@ static int idt77105_stop(struct atm_dev *dev) else idt77105_all = walk->next; dev->phy = NULL; - dev->phy_data = NULL; + dev->dev_data = NULL; kfree(walk); break; } diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 89adb49e435e..074616b39f4d 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3615,7 +3615,7 @@ static int idt77252_init_one(struct pci_dev *pcidev, if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) { printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev)); - goto err_out_disable_pdev; + return err; } card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index dc1b7f11e6af..860a33a90ebf 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3296,7 +3296,7 @@ static void __exit ia_module_exit(void) { pci_unregister_driver(&ia_driver); - del_timer_sync(&ia_timer); + del_timer(&ia_timer); } module_init(ia_module_init); diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 3002b1177005..ce43ae3e87b3 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2239,7 +2239,6 @@ static int lanai_dev_open(struct atm_dev *atmdev) conf1_write(lanai); #endif iounmap(lanai->base); - lanai->base = NULL; error_pci: pci_disable_device(lanai->pci); error: @@ -2252,8 +2251,6 @@ static int lanai_dev_open(struct atm_dev *atmdev) static void lanai_dev_close(struct atm_dev *atmdev) { struct lanai_dev *lanai = (struct lanai_dev *) atmdev->dev_data; - if (lanai->base==NULL) - return; printk(KERN_INFO DEV_LABEL "(itf %d): shutting down interface\n", lanai->number); lanai_timed_poll_stop(lanai); @@ -2563,7 +2560,7 @@ static int lanai_init_one(struct pci_dev *pci, struct atm_dev *atmdev; int result; - lanai = kzalloc(sizeof(*lanai), GFP_KERNEL); + lanai = kmalloc(sizeof(*lanai), GFP_KERNEL); if (lanai == NULL) { printk(KERN_ERR DEV_LABEL ": couldn't allocate dev_data structure!\n"); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 56d464b58768..49da83f87170 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -298,7 +298,7 @@ static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); - del_timer_sync(&ns_timer); + del_timer(&ns_timer); pci_unregister_driver(&nicstar_driver); @@ -525,15 +525,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev) /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); - card->intcnt = 0; - if (request_irq - (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { - pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); - error = 9; - ns_init_card_error(card, error); - return error; - } - /* Initialize TSQ */ card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, @@ -760,6 +751,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev) card->efbie = 1; + card->intcnt = 0; + if (request_irq + (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); + error = 9; + ns_init_card_error(card, error); + return error; + } + /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); @@ -837,12 +837,10 @@ static void ns_init_card_error(ns_dev *card, int error) dev_kfree_skb_any(hb); } if (error >= 12) { - dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, - card->rsq.org, card->rsq.dma); + kfree(card->rsq.org); } if (error >= 11) { - dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, - card->tsq.org, card->tsq.dma); + kfree(card->tsq.org); } if (error >= 10) { free_irq(card->pcidev->irq, card); diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c index b2f4e8df1591..5120a96b3a89 100644 --- a/drivers/atm/uPD98402.c +++ b/drivers/atm/uPD98402.c @@ -210,7 +210,7 @@ static void uPD98402_int(struct atm_dev *dev) static int uPD98402_start(struct atm_dev *dev) { DPRINTK("phy_start\n"); - if (!(dev->phy_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) + if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL))) return -ENOMEM; spin_lock_init(&PRIV(dev)->lock); memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats)); diff --git a/drivers/base/core.c b/drivers/base/core.c index 014b892263c5..5c2a99e2cb84 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -715,7 +715,6 @@ void device_initialize(struct device *dev) device_pm_init(dev); set_dev_node(dev, -1); #ifdef CONFIG_GENERIC_MSI_IRQ - raw_spin_lock_init(&dev->msi_lock); INIT_LIST_HEAD(&dev->msi_list); #endif } @@ -2363,7 +2362,7 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) if (fwnode_is_primary(fn)) { dev->fwnode = fn->secondary; if (!(parent && fn == parent->fwnode)) - fn->secondary = NULL; + fn->secondary = ERR_PTR(-ENODEV); } else { dev->fwnode = NULL; } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 45119d4cbb8b..128bbe8da649 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -208,10 +208,146 @@ static struct attribute_group cpu_isolated_attr_group = { #endif +#ifdef CONFIG_SCHED_HMP + +static ssize_t show_sched_static_cpu_pwr_cost(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + ssize_t rc; + int cpuid = cpu->dev.id; + unsigned int pwr_cost; + + pwr_cost = sched_get_static_cpu_pwr_cost(cpuid); + + rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); + + return rc; +} + +static ssize_t __ref store_sched_static_cpu_pwr_cost(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int err; + int cpuid = cpu->dev.id; + unsigned int pwr_cost; + + err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); + if (err) + return err; + + err = sched_set_static_cpu_pwr_cost(cpuid, pwr_cost); + + if (err >= 0) + err = count; + + return err; +} + +static ssize_t show_sched_static_cluster_pwr_cost(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + ssize_t rc; + int cpuid = cpu->dev.id; + unsigned int pwr_cost; + + pwr_cost = sched_get_static_cluster_pwr_cost(cpuid); + + rc = snprintf(buf, PAGE_SIZE-2, "%d\n", pwr_cost); + + return rc; +} + +static ssize_t __ref store_sched_static_cluster_pwr_cost(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int err; + int cpuid = cpu->dev.id; + unsigned int pwr_cost; + + err = kstrtouint(strstrip((char *)buf), 0, &pwr_cost); + if (err) + return err; + + err = sched_set_static_cluster_pwr_cost(cpuid, pwr_cost); + + if (err >= 0) + err = count; + + return err; +} + +static ssize_t show_sched_cluser_wake_idle(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + ssize_t rc; + int cpuid = cpu->dev.id; + unsigned int wake_up_idle; + + wake_up_idle = sched_get_cluster_wake_idle(cpuid); + + rc = scnprintf(buf, PAGE_SIZE-2, "%d\n", wake_up_idle); + + return rc; +} + +static ssize_t __ref store_sched_cluster_wake_idle(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int err; + int cpuid = cpu->dev.id; + unsigned int wake_up_idle; + + err = kstrtouint(strstrip((char *)buf), 0, &wake_up_idle); + if (err) + return err; + + err = sched_set_cluster_wake_idle(cpuid, wake_up_idle); + + if (err >= 0) + err = count; + + return err; +} + +static DEVICE_ATTR(sched_static_cpu_pwr_cost, 0644, + show_sched_static_cpu_pwr_cost, + store_sched_static_cpu_pwr_cost); +static DEVICE_ATTR(sched_static_cluster_pwr_cost, 0644, + show_sched_static_cluster_pwr_cost, + store_sched_static_cluster_pwr_cost); +static DEVICE_ATTR(sched_cluster_wake_up_idle, 0644, + show_sched_cluser_wake_idle, + store_sched_cluster_wake_idle); + +static struct attribute *hmp_sched_cpu_attrs[] = { + &dev_attr_sched_static_cpu_pwr_cost.attr, + &dev_attr_sched_static_cluster_pwr_cost.attr, + &dev_attr_sched_cluster_wake_up_idle.attr, + NULL +}; + +static struct attribute_group sched_hmp_cpu_attr_group = { + .attrs = hmp_sched_cpu_attrs, +}; + +#endif /* CONFIG_SCHED_HMP */ + static const struct attribute_group *common_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif +#ifdef CONFIG_SCHED_HMP + &sched_hmp_cpu_attr_group, +#endif #ifdef CONFIG_HOTPLUG_CPU &cpu_isolated_attr_group, #endif @@ -222,6 +358,9 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = { #ifdef CONFIG_KEXEC &crash_note_cpu_attr_group, #endif +#ifdef CONFIG_SCHED_HMP + &sched_hmp_cpu_attr_group, +#endif #ifdef CONFIG_HOTPLUG_CPU &cpu_isolated_attr_group, #endif diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 8c05e7a5e777..feba1b211898 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -319,13 +319,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (!wirq) return; - if (device_may_wakeup(wirq->dev)) { - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) - enable_irq(wirq->irq); - + if (device_may_wakeup(wirq->dev)) enable_irq_wake(wirq->irq); - } } /** @@ -340,11 +335,6 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) if (!wirq) return; - if (device_may_wakeup(wirq->dev)) { + if (device_may_wakeup(wirq->dev)) disable_irq_wake(wirq->irq); - - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) - disable_irq_nosync(wirq->irq); - } } diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index f868dda30da0..56486d92c4e7 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,14 +296,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (!blk) return -ENOMEM; - rbnode->block = blk; - if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { present = krealloc(rbnode->cache_present, BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); - if (!present) + if (!present) { + kfree(blk); return -ENOMEM; + } memset(present + BITS_TO_LONGS(rbnode->blklen), 0, (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) @@ -320,6 +320,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, } /* update the rbnode block, its size and the base register */ + rbnode->block = blk; rbnode->blklen = blklen; rbnode->base_reg = base_reg; rbnode->cache_present = present; diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 30827ab3bb07..327f9e374b44 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -246,7 +246,6 @@ EXPORT_SYMBOL(bcma_core_irq); void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) { - device_initialize(&core->dev); core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); @@ -310,10 +309,11 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core) { int err; - err = device_add(&core->dev); + err = device_register(&core->dev); if (err) { bcma_err(bus, "Could not register dev for core 0x%03X\n", core->id.id); + put_device(&core->dev); return; } core->dev_registered = true; @@ -404,7 +404,7 @@ void bcma_unregister_cores(struct bcma_bus *bus) /* Now noone uses internally-handled cores, we can free them */ list_for_each_entry_safe(core, tmp, &bus->cores, list) { list_del(&core->list); - put_device(&core->dev); + kfree(core); } } diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 324abc8d53fa..29819e719afa 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -267,7 +267,7 @@ config BLK_DEV_LOOP_MIN_COUNT dynamically allocated with the /dev/loop-control interface. config BLK_DEV_CRYPTOLOOP - tristate "Cryptoloop Support (DEPRECATED)" + tristate "Cryptoloop Support" select CRYPTO select CRYPTO_CBC depends on BLK_DEV_LOOP @@ -279,7 +279,7 @@ config BLK_DEV_CRYPTOLOOP WARNING: This device is not safe for journaled file systems like ext3 or Reiserfs. Please use the Device Mapper crypto module instead, which can be configured to be on-disk compatible with the - cryptoloop device. cryptoloop support will be removed in Linux 5.16. + cryptoloop device. source "drivers/block/drbd/Kconfig" @@ -540,7 +540,6 @@ config BLK_DEV_RBD config BLK_DEV_RSXX tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" depends on PCI - select CRC32 help Device driver for IBM's high speed PCIe SSD storage device: Flash Adapter 900GB Full Height. diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 2a1a4ac8933c..58c1138ad5e1 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -22,6 +22,7 @@ #include +#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index d3d1f24ca7a3..99e773cb70d0 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -201,8 +201,6 @@ init_cryptoloop(void) if (rc) printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); - else - pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n"); return rc; } diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 6ffa49fafb5c..8bdc34dbaedf 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1069,7 +1069,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned .done = 0, .flags = flags, .error = 0, - .kref = KREF_INIT(2), + .kref = { ATOMIC_INIT(2) }, }; if (!get_ldev_if_state(device, D_ATTACHING)) { /* put is in drbd_bm_aio_ctx_destroy() */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 373dab29addf..58ce577ba6d7 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -870,7 +870,7 @@ static void set_fdc(int drive) } /* locks the driver */ -static int lock_fdc(int drive) +static int lock_fdc(int drive, bool interruptible) { if (WARN(atomic_read(&usage_count) == 0, "Trying to lock fdc while usage count=0\n")) @@ -994,7 +994,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); static void cancel_activity(void) { do_floppy = NULL; - cancel_delayed_work(&fd_timer); + cancel_delayed_work_sync(&fd_timer); cancel_work_sync(&floppy_work); } @@ -2180,7 +2180,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req) { int ret; - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); @@ -2967,7 +2967,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible) { int ret; - if (lock_fdc(drive)) + if (lock_fdc(drive, interruptible)) return -EINTR; if (arg == FD_RESET_ALWAYS) @@ -3116,8 +3116,6 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) } } -#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) - static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) { @@ -3155,7 +3153,7 @@ loop: ptr->resultcode = 0; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { - if (ptr->length <= 0 || ptr->length >= MAX_LEN) + if (ptr->length <= 0) return -EINVAL; ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); @@ -3256,7 +3254,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&open_lock); - if (lock_fdc(drive)) { + if (lock_fdc(drive, true)) { mutex_unlock(&open_lock); return -EINTR; } @@ -3276,7 +3274,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, } else { int oldStretch; - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; if (cmd != FDDEFPRM) { /* notice a disk change immediately, else @@ -3362,7 +3360,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) if (type) *g = &floppy_type[type]; else { - if (lock_fdc(drive)) + if (lock_fdc(drive, false)) return -EINTR; if (poll_drive(false, 0) == -EINTR) return -EINTR; @@ -3464,7 +3462,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int if (UDRS->fd_ref != 1) /* somebody else has this drive open */ return -EBUSY; - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; /* do the actual eject. Fails on @@ -3476,7 +3474,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int process_fd_request(); return ret; case FDCLRPRM: - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; current_type[drive] = NULL; floppy_sizes[drive] = MAX_DISK_SIZE << 1; @@ -3501,7 +3499,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int UDP->flags &= ~FTD_MSG; return 0; case FDFMTBEG: - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; @@ -3518,7 +3516,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int return do_format(drive, &inparam.f); case FDFMTEND: case FDFLUSH: - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; return invalidate_drive(bdev); case FDSETEMSGTRESH: @@ -3544,7 +3542,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = UDP; break; case FDPOLLDRVSTAT: - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) return -EINTR; @@ -3567,7 +3565,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int case FDRAWCMD: if (type) return -EINVAL; - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; set_floppy(drive); i = raw_cmd_ioctl(cmd, (void __user *)param); @@ -3576,7 +3574,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int process_fd_request(); return i; case FDTWADDLE: - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) return -EINTR; twaddle(); process_fd_request(); @@ -3803,7 +3801,7 @@ static int compat_getdrvstat(int drive, bool poll, mutex_lock(&floppy_mutex); if (poll) { - if (lock_fdc(drive)) + if (lock_fdc(drive, true)) goto Eintr; if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) goto Eintr; @@ -4110,8 +4108,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, return DISK_EVENT_MEDIA_CHANGE; if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { - if (lock_fdc(drive)) - return -EINTR; + lock_fdc(drive, false); poll_drive(false, 0); process_fd_request(); } @@ -4210,9 +4207,7 @@ static int floppy_revalidate(struct gendisk *disk) "VFS: revalidate called on non-open device.\n")) return -EFAULT; - res = lock_fdc(drive); - if (res) - return res; + lock_fdc(drive, false); cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || test_bit(FD_VERIFY_BIT, &UDRS->flags)); if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 503e245912eb..c66033b6b67f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -49,6 +49,15 @@ #define RBD_DEBUG /* Activate rbd_assert() calls */ +/* + * The basic unit of block I/O is a sector. It is interpreted in a + * number of contexts in Linux (blk, bio, genhd), but the default is + * universally 512 bytes. These symbols are just slightly more + * meaningful than the bare numbers they represent. + */ +#define SECTOR_SHIFT 9 +#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) + /* * Increment the given counter and return its updated value. * If the counter is already 0 it will not be incremented. diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index a53271acc2a2..620a3a67cdd5 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -180,17 +180,15 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, { struct rsxx_cardinfo *card = file_inode(fp)->i_private; char *buf; - int st; + ssize_t st; buf = kzalloc(cnt, GFP_KERNEL); if (!buf) return -ENOMEM; st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); - if (!st) { - if (copy_to_user(ubuf, buf, cnt)) - st = -EFAULT; - } + if (!st) + st = copy_to_user(ubuf, buf, cnt); kfree(buf); if (st) return st; @@ -895,7 +893,6 @@ static int rsxx_pci_probe(struct pci_dev *dev, card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); if (!card->event_wq) { dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); - st = -ENOMEM; goto failed_event_handler; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2bcc2bc64149..bdc3efacd0d2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -808,8 +808,6 @@ static int virtblk_freeze(struct virtio_device *vdev) blk_mq_stop_hw_queues(vblk->disk->queue); vdev->config->del_vqs(vdev); - kfree(vblk->vqs); - return 0; } diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 698a52a96d2d..8dbdd156e0d3 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -825,11 +825,8 @@ again: pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { - if (get_free_page(blkif, &pages[i]->page)) { - put_free_pages(blkif, pages_to_gnt, segs_to_map); - ret = -ENOMEM; - goto out; - } + if (get_free_page(blkif, &pages[i]->page)) + goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; @@ -845,8 +842,10 @@ again: break; } - if (segs_to_map) + if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); + BUG_ON(ret); + } /* * Now swizzle the MFN in our domain with the MFN from the other domain @@ -861,7 +860,7 @@ again: pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(blkif, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; - ret |= !ret; + ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; @@ -913,18 +912,15 @@ next: } segs_to_map = 0; last_map = map_until; - if (!ret && map_until != num) + if (map_until != num) goto again; -out: - for (i = last_map; i < num; i++) { - /* Don't zap current batch's valid persistent grants. */ - if(i >= map_until) - pages[i]->persistent_gnt = NULL; - pages[i]->handle = BLKBACK_INVALID_HANDLE; - } - return ret; + +out_of_memory: + pr_alert("%s: out of memory\n", __func__); + put_free_pages(blkif, pages_to_gnt, segs_to_map); + return -ENOMEM; } static int xen_blkbk_map_seg(struct pending_req *pending_req) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index f974ed7c33b5..0ec257e69e95 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -219,7 +219,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); - blkif->xenblkd = NULL; wake_up(&blkif->shutdown_wq); } @@ -554,8 +553,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, /* setup back pointer */ be->blkif->be = be; - err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL, - backend_changed, + err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, "%s/%s", dev->nodename, "physical-device"); if (err) goto fail; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ae2c47e99c88..e1f71debdbba 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -64,7 +64,6 @@ enum blkif_state { BLKIF_STATE_DISCONNECTED, BLKIF_STATE_CONNECTED, BLKIF_STATE_SUSPENDED, - BLKIF_STATE_ERROR, }; struct grant { @@ -80,7 +79,6 @@ struct blk_shadow { struct grant **indirect_grants; struct scatterlist *sg; unsigned int num_sg; - bool inflight; }; struct split_bio { @@ -458,31 +456,16 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, return 0; } -static unsigned long blkif_ring_get_request(struct blkfront_info *info, - struct request *req, - struct blkif_request **ring_req) -{ - unsigned long id; - - *ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); - info->ring.req_prod_pvt++; - - id = get_id_from_freelist(info); - info->shadow[id].request = req; - info->shadow[id].req.u.rw.id = id; - - return id; -} - static int blkif_queue_discard_req(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; - struct blkif_request *ring_req, *final_ring_req; + struct blkif_request *ring_req; unsigned long id; /* Fill out a communications ring structure. */ - id = blkif_ring_get_request(info, req, &final_ring_req); - ring_req = &info->shadow[id].req; + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = get_id_from_freelist(info); + info->shadow[id].request = req; ring_req->operation = BLKIF_OP_DISCARD; ring_req->u.discard.nr_sectors = blk_rq_sectors(req); @@ -493,9 +476,10 @@ static int blkif_queue_discard_req(struct request *req) else ring_req->u.discard.flag = 0; - /* Copy the request to the ring page. */ - *final_ring_req = *ring_req; - info->shadow[id].inflight = true; + info->ring.req_prod_pvt++; + + /* Keep a private copy so we can reissue requests when recovering. */ + info->shadow[id].req = *ring_req; return 0; } @@ -585,7 +569,7 @@ static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset, static int blkif_queue_rw_req(struct request *req) { struct blkfront_info *info = req->rq_disk->private_data; - struct blkif_request *ring_req, *final_ring_req; + struct blkif_request *ring_req; unsigned long id; int i; struct setup_rw_req setup = { @@ -629,8 +613,9 @@ static int blkif_queue_rw_req(struct request *req) new_persistent_gnts = 0; /* Fill out a communications ring structure. */ - id = blkif_ring_get_request(info, req, &final_ring_req); - ring_req = &info->shadow[id].req; + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = get_id_from_freelist(info); + info->shadow[id].request = req; BUG_ON(info->max_indirect_segments == 0 && GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST); @@ -709,9 +694,10 @@ static int blkif_queue_rw_req(struct request *req) if (setup.segments) kunmap_atomic(setup.segments); - /* Copy request(s) to the ring page. */ - *final_ring_req = *ring_req; - info->shadow[id].inflight = true; + info->ring.req_prod_pvt++; + + /* Keep a private copy so we can reissue requests when recovering. */ + info->shadow[id].req = *ring_req; if (new_persistent_gnts) gnttab_free_grant_references(setup.gref_head); @@ -1310,84 +1296,58 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, static irqreturn_t blkif_interrupt(int irq, void *dev_id) { struct request *req; - struct blkif_response bret; + struct blkif_response *bret; RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; int error; - unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; spin_lock_irqsave(&info->io_lock, flags); if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { spin_unlock_irqrestore(&info->io_lock, flags); - xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); return IRQ_HANDLED; } again: - rp = READ_ONCE(info->ring.sring->rsp_prod); + rp = info->ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ - if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) { - pr_alert("%s: illegal number of responses %u\n", - info->gd->disk_name, rp - info->ring.rsp_cons); - goto err; - } for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; - unsigned int op; - - eoiflag = 0; - - RING_COPY_RESPONSE(&info->ring, i, &bret); - id = bret.id; + bret = RING_GET_RESPONSE(&info->ring, i); + id = bret->id; /* * The backend has messed up and given us an id that we would * never have given to it (we stamp it up to BLK_RING_SIZE - * look in get_id_from_freelist. */ if (id >= BLK_RING_SIZE(info)) { - pr_alert("%s: response has incorrect id (%ld)\n", - info->gd->disk_name, id); - goto err; - } - if (!info->shadow[id].inflight) { - pr_alert("%s: response references no pending request\n", - info->gd->disk_name); - goto err; + WARN(1, "%s: response to %s has incorrect id (%ld)\n", + info->gd->disk_name, op_name(bret->operation), id); + /* We can't safely get the 'struct request' as + * the id is busted. */ + continue; } - - info->shadow[id].inflight = false; req = info->shadow[id].request; - op = info->shadow[id].req.operation; - if (op == BLKIF_OP_INDIRECT) - op = info->shadow[id].req.u.indirect.indirect_op; - if (bret.operation != op) { - pr_alert("%s: response has wrong operation (%u instead of %u)\n", - info->gd->disk_name, bret.operation, op); - goto err; - } - - if (bret.operation != BLKIF_OP_DISCARD) - blkif_completion(&info->shadow[id], info, &bret); + if (bret->operation != BLKIF_OP_DISCARD) + blkif_completion(&info->shadow[id], info, bret); if (add_id_to_freelist(info, id)) { WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", - info->gd->disk_name, op_name(bret.operation), id); + info->gd->disk_name, op_name(bret->operation), id); continue; } - error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO; - switch (bret.operation) { + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + switch (bret->operation) { case BLKIF_OP_DISCARD: - if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) { + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; - - pr_warn_ratelimited("blkfront: %s: %s op failed\n", - info->gd->disk_name, op_name(bret.operation)); + printk(KERN_WARNING "blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; @@ -1398,15 +1358,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: - if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) { - pr_warn_ratelimited("blkfront: %s: %s op failed\n", - info->gd->disk_name, op_name(bret.operation)); + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { + printk(KERN_WARNING "blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; } - if (unlikely(bret.status == BLKIF_RSP_ERROR && + if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { - pr_warn_ratelimited("blkfront: %s: empty %s op failed\n", - info->gd->disk_name, op_name(bret.operation)); + printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; } if (unlikely(error)) { @@ -1418,10 +1378,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) /* fall through */ case BLKIF_OP_READ: case BLKIF_OP_WRITE: - if (unlikely(bret.status != BLKIF_RSP_OKAY)) - dev_dbg_ratelimited(&info->xbdev->dev, - "Bad return from blkdev data request: %x\n", - bret.status); + if (unlikely(bret->status != BLKIF_RSP_OKAY)) + dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " + "request: %x\n", bret->status); blk_mq_complete_request(req, error); break; @@ -1444,18 +1403,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&info->io_lock, flags); - xen_irq_lateeoi(irq, eoiflag); - - return IRQ_HANDLED; - - err: - info->connected = BLKIF_STATE_ERROR; - - spin_unlock_irqrestore(&info->io_lock, flags); - - /* No EOI in order to avoid further interrupts. */ - - pr_alert("%s disabled for further use\n", info->gd->disk_name); return IRQ_HANDLED; } @@ -1493,8 +1440,8 @@ static int setup_blkring(struct xenbus_device *dev, if (err) goto fail; - err = bind_evtchn_to_irqhandler_lateeoi(info->evtchn, blkif_interrupt, - 0, "blkif", info); + err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0, + "blkif", info); if (err <= 0) { xenbus_dev_fatal(dev, err, "bind_evtchn_to_irqhandler failed"); @@ -1966,7 +1913,6 @@ out_of_memory: info->shadow[i].sg = NULL; kfree(info->shadow[i].indirect_grants); info->shadow[i].indirect_grants = NULL; - info->shadow[i].inflight = false; } if (!list_empty(&info->indirect_pages)) { struct page *indirect_page, *n; diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index cb53957d58f9..4831d0a4f352 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -1,7 +1,9 @@ config ZRAM tristate "Compressed RAM block device support" - depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO - select CRYPTO_LZO + depends on BLOCK && SYSFS + select ZPOOL + select LZO_COMPRESS + select LZO_DECOMPRESS default n help Creates virtual block devices called /dev/zramX (X = 0, 1, ...). @@ -12,26 +14,14 @@ config ZRAM It has several use cases, for example: /tmp storage, use as swap disks and maybe many more. - See Documentation/blockdev/zram.txt for more information. + See zram.txt for more information. -config ZRAM_WRITEBACK - bool "Write back incompressible page to backing device" - depends on ZRAM - default n - help - With incompressible page, there is no memory saving to keep it - in memory. Instead, write it out to backing device. - For this feature, admin should set up backing device via - /sys/block/zramX/backing_dev. - - See Documentation/blockdev/zram.txt for more information. - -config ZRAM_MEMORY_TRACKING - bool "Track zRam block status" - depends on ZRAM && DEBUG_FS +config ZRAM_LZ4_COMPRESS + bool "Enable LZ4 algorithm support" + depends on ZRAM + select LZ4_COMPRESS + select LZ4_DECOMPRESS + default n help - With this feature, admin can track the state of allocated blocks - of zRAM. Admin could see the information via - /sys/kernel/debug/zram/zramX/block_state. - - See Documentation/blockdev/zram.txt for more information. + This option enables LZ4 compression algorithm support. Compression + algorithm can be changed using `comp_algorithm' device attribute. \ No newline at end of file diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index 9e2b79e9a990..be0763ff57a2 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile @@ -1,3 +1,5 @@ -zram-y := zcomp.o zram_drv.o +zram-y := zcomp_lzo.o zcomp.o zram_drv.o + +zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o obj-$(CONFIG_ZRAM) += zram.o diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index c084a7f9763d..b51a816d766b 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -14,153 +14,108 @@ #include #include #include -#include #include "zcomp.h" - -static const char * const backends[] = { - "lzo", -#if IS_ENABLED(CONFIG_CRYPTO_LZ4) - "lz4", -#endif -#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE) - "deflate", -#endif -#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC) - "lz4hc", +#include "zcomp_lzo.h" +#ifdef CONFIG_ZRAM_LZ4_COMPRESS +#include "zcomp_lz4.h" #endif -#if IS_ENABLED(CONFIG_CRYPTO_842) - "842", -#endif -#if IS_ENABLED(CONFIG_CRYPTO_ZSTD) - "zstd", + +static struct zcomp_backend *backends[] = { + &zcomp_lzo, +#ifdef CONFIG_ZRAM_LZ4_COMPRESS + &zcomp_lz4, #endif NULL }; -static void zcomp_strm_free(struct zcomp_strm *zstrm) +static struct zcomp_backend *find_backend(const char *compress) +{ + int i = 0; + while (backends[i]) { + if (sysfs_streq(compress, backends[i]->name)) + break; + i++; + } + return backends[i]; +} + +static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) { - if (!IS_ERR_OR_NULL(zstrm->tfm)) - crypto_free_comp(zstrm->tfm); + if (zstrm->private) + comp->backend->destroy(zstrm->private); free_pages((unsigned long)zstrm->buffer, 1); kfree(zstrm); } /* - * allocate new zcomp_strm structure with ->tfm initialized by + * allocate new zcomp_strm structure with ->private initialized by * backend, return NULL on error */ -static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) +static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) { - struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL); + struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), flags); if (!zstrm) return NULL; - zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); + zstrm->private = comp->backend->create(flags); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ - zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); - if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { - zcomp_strm_free(zstrm); + zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1); + if (!zstrm->private || !zstrm->buffer) { + zcomp_strm_free(comp, zstrm); zstrm = NULL; } return zstrm; } -bool zcomp_available_algorithm(const char *comp) -{ - int i = 0; - - while (backends[i]) { - if (sysfs_streq(comp, backends[i])) - return true; - i++; - } - - /* - * Crypto does not ignore a trailing new line symbol, - * so make sure you don't supply a string containing - * one. - * This also means that we permit zcomp initialisation - * with any compressing algorithm known to crypto api. - */ - return crypto_has_comp(comp, 0, 0) == 1; -} - /* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { - bool known_algorithm = false; ssize_t sz = 0; int i = 0; - for (; backends[i]; i++) { - if (!strcmp(comp, backends[i])) { - known_algorithm = true; + while (backends[i]) { + if (!strcmp(comp, backends[i]->name)) sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", backends[i]); - } else { + "[%s] ", backends[i]->name); + else sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "%s ", backends[i]); - } + "%s ", backends[i]->name); + i++; } - - /* - * Out-of-tree module known to crypto api or a missing - * entry in `backends'. - */ - if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) - sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", comp); - sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; } -struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) +bool zcomp_available_algorithm(const char *comp) +{ + return find_backend(comp) != NULL; +} + +struct zcomp_strm *zcomp_strm_find(struct zcomp *comp) { return *get_cpu_ptr(comp->stream); } -void zcomp_stream_put(struct zcomp *comp) +void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm) { put_cpu_ptr(comp->stream); } -int zcomp_compress(struct zcomp_strm *zstrm, - const void *src, unsigned int *dst_len) +int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, + const unsigned char *src, size_t *dst_len) { - /* - * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized - * because sometimes we can endup having a bigger compressed data - * due to various reasons: for example compression algorithms tend - * to add some padding to the compressed buffer. Speaking of padding, - * comp algorithm `842' pads the compressed length to multiple of 8 - * and returns -ENOSP when the dst memory is not big enough, which - * is not something that ZRAM wants to see. We can handle the - * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we - * receive -ERRNO from the compressing backend we can't help it - * anymore. To make `842' happy we need to tell the exact size of - * the dst buffer, zram_drv will take care of the fact that - * compressed buffer is too big. - */ - *dst_len = PAGE_SIZE * 2; - - return crypto_comp_compress(zstrm->tfm, - src, PAGE_SIZE, - zstrm->buffer, dst_len); + return comp->backend->compress(src, zstrm->buffer, dst_len, + zstrm->private); } -int zcomp_decompress(struct zcomp_strm *zstrm, - const void *src, unsigned int src_len, void *dst) +int zcomp_decompress(struct zcomp *comp, const unsigned char *src, + size_t src_len, unsigned char *dst) { - unsigned int dst_len = PAGE_SIZE; - - return crypto_comp_decompress(zstrm->tfm, - src, src_len, - dst, &dst_len); + return comp->backend->decompress(src, src_len, dst); } static int __zcomp_cpu_notifier(struct zcomp *comp, @@ -172,7 +127,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_PREPARE: if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) break; - zstrm = zcomp_strm_alloc(comp); + zstrm = zcomp_strm_alloc(comp, GFP_KERNEL); if (IS_ERR_OR_NULL(zstrm)) { pr_err("Can't allocate a compression stream\n"); return NOTIFY_BAD; @@ -183,7 +138,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_CANCELED: zstrm = *per_cpu_ptr(comp->stream, cpu); if (!IS_ERR_OR_NULL(zstrm)) - zcomp_strm_free(zstrm); + zcomp_strm_free(comp, zstrm); *per_cpu_ptr(comp->stream, cpu) = NULL; break; default: @@ -254,16 +209,18 @@ void zcomp_destroy(struct zcomp *comp) struct zcomp *zcomp_create(const char *compress) { struct zcomp *comp; + struct zcomp_backend *backend; int error; - if (!zcomp_available_algorithm(compress)) + backend = find_backend(compress); + if (!backend) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); - comp->name = compress; + comp->backend = backend; error = zcomp_init(comp); if (error) { kfree(comp); diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 478cac2ed465..ffd88cb747fe 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -13,15 +13,33 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; - struct crypto_comp *tfm; + /* + * The private data of the compression stream, only compression + * stream backend can touch this (e.g. compression algorithm + * working memory) + */ + void *private; +}; + +/* static compression backend */ +struct zcomp_backend { + int (*compress)(const unsigned char *src, unsigned char *dst, + size_t *dst_len, void *private); + + int (*decompress)(const unsigned char *src, size_t src_len, + unsigned char *dst); + + void *(*create)(gfp_t flags); + void (*destroy)(void *private); + + const char *name; }; /* dynamic per-device compression frontend */ struct zcomp { struct zcomp_strm * __percpu *stream; + struct zcomp_backend *backend; struct notifier_block notifier; - - const char *name; }; ssize_t zcomp_available_show(const char *comp, char *buf); @@ -30,14 +48,14 @@ bool zcomp_available_algorithm(const char *comp); struct zcomp *zcomp_create(const char *comp); void zcomp_destroy(struct zcomp *comp); -struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); -void zcomp_stream_put(struct zcomp *comp); +struct zcomp_strm *zcomp_strm_find(struct zcomp *comp); +void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm); -int zcomp_compress(struct zcomp_strm *zstrm, - const void *src, unsigned int *dst_len); +int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, + const unsigned char *src, size_t *dst_len); -int zcomp_decompress(struct zcomp_strm *zstrm, - const void *src, unsigned int src_len, void *dst); +int zcomp_decompress(struct zcomp *comp, const unsigned char *src, + size_t src_len, unsigned char *dst); bool zcomp_set_max_streams(struct zcomp *comp, int num_strm); #endif /* _ZCOMP_H_ */ diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c new file mode 100644 index 000000000000..dc2338d5258c --- /dev/null +++ b/drivers/block/zram/zcomp_lz4.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Sergey Senozhatsky. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "zcomp_lz4.h" + +static void *zcomp_lz4_create(gfp_t flags) +{ + void *ret; + + ret = kzalloc(LZ4_MEM_COMPRESS, flags); + if (!ret) + ret = __vmalloc(LZ4_MEM_COMPRESS, + flags | __GFP_ZERO | __GFP_HIGHMEM, + PAGE_KERNEL); + return ret; +} + +static void zcomp_lz4_destroy(void *private) +{ + kvfree(private); +} + +static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst, + size_t *dst_len, void *private) +{ + /* return : Success if return 0 */ + return lz4_compress(src, PAGE_SIZE, dst, dst_len, private); +} + +static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len, + unsigned char *dst) +{ + size_t dst_len = PAGE_SIZE; + /* return : Success if return 0 */ + return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len); +} + +struct zcomp_backend zcomp_lz4 = { + .compress = zcomp_lz4_compress, + .decompress = zcomp_lz4_decompress, + .create = zcomp_lz4_create, + .destroy = zcomp_lz4_destroy, + .name = "lz4", +}; diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h new file mode 100644 index 000000000000..60613fb29dd8 --- /dev/null +++ b/drivers/block/zram/zcomp_lz4.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2014 Sergey Senozhatsky. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ZCOMP_LZ4_H_ +#define _ZCOMP_LZ4_H_ + +#include "zcomp.h" + +extern struct zcomp_backend zcomp_lz4; + +#endif /* _ZCOMP_LZ4_H_ */ diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c new file mode 100644 index 000000000000..0ab6fce8abe4 --- /dev/null +++ b/drivers/block/zram/zcomp_lzo.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Sergey Senozhatsky. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "zcomp_lzo.h" + +static void *lzo_create(gfp_t flags) +{ + void *ret; + + ret = kzalloc(LZO1X_MEM_COMPRESS, flags); + if (!ret) + ret = __vmalloc(LZO1X_MEM_COMPRESS, + flags | __GFP_ZERO | __GFP_HIGHMEM, + PAGE_KERNEL); + return ret; +} + +static void lzo_destroy(void *private) +{ + kvfree(private); +} + +static int lzo_compress(const unsigned char *src, unsigned char *dst, + size_t *dst_len, void *private) +{ + int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private); + return ret == LZO_E_OK ? 0 : ret; +} + +static int lzo_decompress(const unsigned char *src, size_t src_len, + unsigned char *dst) +{ + size_t dst_len = PAGE_SIZE; + int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len); + return ret == LZO_E_OK ? 0 : ret; +} + +struct zcomp_backend zcomp_lzo = { + .compress = lzo_compress, + .decompress = lzo_decompress, + .create = lzo_create, + .destroy = lzo_destroy, + .name = "lzo", +}; diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h new file mode 100644 index 000000000000..128c5807fa14 --- /dev/null +++ b/drivers/block/zram/zcomp_lzo.h @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2014 Sergey Senozhatsky. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _ZCOMP_LZO_H_ +#define _ZCOMP_LZO_H_ + +#include "zcomp.h" + +extern struct zcomp_backend zcomp_lzo; + +#endif /* _ZCOMP_LZO_H_ */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a3abe6e89e4e..22680ab5140e 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -11,6 +11,11 @@ * Released under the terms of GNU General Public License Version 2.0 * */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #define KMSG_COMPONENT "zram" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt @@ -25,13 +30,11 @@ #include #include #include -#include #include #include #include #include #include -#include #include "zram_drv.h" @@ -41,108 +44,86 @@ static DEFINE_MUTEX(zram_index_mutex); static int zram_major; static const char *default_compressor = "lzo"; +#define BACKEND_PARAM_BUF_SIZE 32 +static char backend_param_buf[BACKEND_PARAM_BUF_SIZE]; -/* Module params (documentation at end) */ -static unsigned int num_devices = 1; /* - * Pages that compress to sizes equals or greater than this are stored - * uncompressed in memory. + * We don't need to see memory allocation errors more than once every 1 + * second to know that a problem is occurring. */ -static size_t huge_class_size; +#define ALLOC_ERROR_LOG_RATE_MS 1000 -static void zram_free_page(struct zram *zram, size_t index); -static void zram_slot_lock(struct zram *zram, u32 index) -{ - bit_spin_lock(ZRAM_LOCK, &zram->table[index].value); -} +/* Module params (documentation at end) */ +static unsigned int num_devices = 1; -static void zram_slot_unlock(struct zram *zram, u32 index) +static inline void deprecated_attr_warn(const char *name) { - bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value); + pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", + task_pid_nr(current), + current->comm, + name, + "See zram documentation."); } +#define ZRAM_ATTR_RO(name) \ +static ssize_t name##_show(struct device *d, \ + struct device_attribute *attr, char *b) \ +{ \ + struct zram *zram = dev_to_zram(d); \ + \ + deprecated_attr_warn(__stringify(name)); \ + return scnprintf(b, PAGE_SIZE, "%llu\n", \ + (u64)atomic64_read(&zram->stats.name)); \ +} \ +static DEVICE_ATTR_RO(name); + static inline bool init_done(struct zram *zram) { return zram->disksize; } -static inline bool zram_allocated(struct zram *zram, u32 index) -{ - - return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) || - zram->table[index].handle; -} - static inline struct zram *dev_to_zram(struct device *dev) { return (struct zram *)dev_to_disk(dev)->private_data; } -static unsigned long zram_get_handle(struct zram *zram, u32 index) -{ - return zram->table[index].handle; -} - -static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) -{ - zram->table[index].handle = handle; -} - /* flag operations require table entry bit_spin_lock() being held */ -static bool zram_test_flag(struct zram *zram, u32 index, +static int zram_test_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - return zram->table[index].value & BIT(flag); + return meta->table[index].value & BIT(flag); } -static void zram_set_flag(struct zram *zram, u32 index, +static void zram_set_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].value |= BIT(flag); + meta->table[index].value |= BIT(flag); } -static void zram_clear_flag(struct zram *zram, u32 index, +static void zram_clear_flag(struct zram_meta *meta, u32 index, enum zram_pageflags flag) { - zram->table[index].value &= ~BIT(flag); -} - -static inline void zram_set_element(struct zram *zram, u32 index, - unsigned long element) -{ - zram->table[index].element = element; + meta->table[index].value &= ~BIT(flag); } -static unsigned long zram_get_element(struct zram *zram, u32 index) +static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) { - return zram->table[index].element; + return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); } -static size_t zram_get_obj_size(struct zram *zram, u32 index) -{ - return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); -} - -static void zram_set_obj_size(struct zram *zram, +static void zram_set_obj_size(struct zram_meta *meta, u32 index, size_t size) { - unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT; + unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; - zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; + meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; } -#if PAGE_SIZE != 4096 static inline bool is_partial_io(struct bio_vec *bvec) { return bvec->bv_len != PAGE_SIZE; } -#else -static inline bool is_partial_io(struct bio_vec *bvec) -{ - return false; -} -#endif /* * Check if request is within bounds and aligned on zram logical blocks. @@ -170,7 +151,8 @@ static inline bool valid_io_request(struct zram *zram, static void update_position(u32 *index, int *offset, struct bio_vec *bvec) { - *index += (*offset + bvec->bv_len) / PAGE_SIZE; + if (*offset + bvec->bv_len >= PAGE_SIZE) + (*index)++; *offset = (*offset + bvec->bv_len) % PAGE_SIZE; } @@ -189,41 +171,36 @@ static inline void update_used_max(struct zram *zram, } while (old_max != cur_max); } -static inline void zram_fill_page(char *ptr, unsigned long len, - unsigned long value) -{ - int i; - unsigned long *page = (unsigned long *)ptr; - - WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); - - if (likely(value == 0)) { - memset(ptr, 0, len); - } else { - for (i = 0; i < len / sizeof(*page); i++) - page[i] = value; - } -} - -static bool page_same_filled(void *ptr, unsigned long *element) +static bool page_zero_filled(void *ptr) { unsigned int pos; unsigned long *page; - unsigned long val; page = (unsigned long *)ptr; - val = page[0]; - for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { - if (val != page[pos]) + for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { + if (page[pos]) return false; } - *element = val; - return true; } +static void handle_zero_page(struct bio_vec *bvec) +{ + struct page *page = bvec->bv_page; + void *user_mem; + + user_mem = kmap_atomic(page); + if (is_partial_io(bvec)) + memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); + else + clear_page(user_mem); + kunmap_atomic(user_mem); + + flush_dcache_page(page); +} + static ssize_t initstate_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -245,515 +222,101 @@ static ssize_t disksize_show(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); } -static ssize_t mem_limit_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) +static ssize_t orig_data_size_show(struct device *dev, + struct device_attribute *attr, char *buf) { - u64 limit; - char *tmp; struct zram *zram = dev_to_zram(dev); - limit = memparse(buf, &tmp); - if (buf == tmp) /* no chars parsed, invalid input */ - return -EINVAL; - - down_write(&zram->init_lock); - zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; - up_write(&zram->init_lock); - - return len; + deprecated_attr_warn("orig_data_size"); + return scnprintf(buf, PAGE_SIZE, "%llu\n", + (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); } -static ssize_t mem_used_max_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) +static ssize_t mem_used_total_show(struct device *dev, + struct device_attribute *attr, char *buf) { - int err; - unsigned long val; + u64 val = 0; struct zram *zram = dev_to_zram(dev); - err = kstrtoul(buf, 10, &val); - if (err || val != 0) - return -EINVAL; - + deprecated_attr_warn("mem_used_total"); down_read(&zram->init_lock); if (init_done(zram)) { - atomic_long_set(&zram->stats.max_used_pages, - zs_get_total_pages(zram->mem_pool)); + struct zram_meta *meta = zram->meta; + val = zpool_get_total_size(meta->mem_pool); } up_read(&zram->init_lock); - return len; + return scnprintf(buf, PAGE_SIZE, "%llu\n", val); } -#ifdef CONFIG_ZRAM_WRITEBACK -static bool zram_wb_enabled(struct zram *zram) -{ - return zram->backing_dev; -} - -static void reset_bdev(struct zram *zram) -{ - struct block_device *bdev; - - if (!zram_wb_enabled(zram)) - return; - - bdev = zram->bdev; - if (zram->old_block_size) - set_blocksize(bdev, zram->old_block_size); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); - /* hope filp_close flush all of IO */ - filp_close(zram->backing_dev, NULL); - zram->backing_dev = NULL; - zram->old_block_size = 0; - zram->bdev = NULL; - - kvfree(zram->bitmap); - zram->bitmap = NULL; -} - -static ssize_t backing_dev_show(struct device *dev, +static ssize_t mem_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { + u64 val; struct zram *zram = dev_to_zram(dev); - struct file *file = zram->backing_dev; - char *p; - ssize_t ret; + deprecated_attr_warn("mem_limit"); down_read(&zram->init_lock); - if (!zram_wb_enabled(zram)) { - memcpy(buf, "none\n", 5); - up_read(&zram->init_lock); - return 5; - } - - p = file_path(file, buf, PAGE_SIZE - 1); - if (IS_ERR(p)) { - ret = PTR_ERR(p); - goto out; - } - - ret = strlen(p); - memmove(buf, p, ret); - buf[ret++] = '\n'; -out: + val = zram->limit_pages; up_read(&zram->init_lock); - return ret; + + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); } -static ssize_t backing_dev_store(struct device *dev, +static ssize_t mem_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - char *file_name; - size_t sz; - struct file *backing_dev = NULL; - struct inode *inode; - struct address_space *mapping; - unsigned int bitmap_sz, old_block_size = 0; - unsigned long nr_pages, *bitmap = NULL; - struct block_device *bdev = NULL; - int err; + u64 limit; + char *tmp; struct zram *zram = dev_to_zram(dev); - gfp_t kmalloc_flags; - file_name = kmalloc(PATH_MAX, GFP_KERNEL); - if (!file_name) - return -ENOMEM; + limit = memparse(buf, &tmp); + if (buf == tmp) /* no chars parsed, invalid input */ + return -EINVAL; down_write(&zram->init_lock); - if (init_done(zram)) { - pr_info("Can't setup backing device for initialized device\n"); - err = -EBUSY; - goto out; - } - - strlcpy(file_name, buf, PATH_MAX); - /* ignore trailing newline */ - sz = strlen(file_name); - if (sz > 0 && file_name[sz - 1] == '\n') - file_name[sz - 1] = 0x00; - - backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); - if (IS_ERR(backing_dev)) { - err = PTR_ERR(backing_dev); - backing_dev = NULL; - goto out; - } - - mapping = backing_dev->f_mapping; - inode = mapping->host; - - /* Support only block device in this moment */ - if (!S_ISBLK(inode->i_mode)) { - err = -ENOTBLK; - goto out; - } - - bdev = bdgrab(I_BDEV(inode)); - err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); - if (err < 0) - goto out; - - nr_pages = i_size_read(inode) >> PAGE_SHIFT; - bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); - kmalloc_flags = GFP_KERNEL | __GFP_ZERO; - if (bitmap_sz > PAGE_SIZE) - kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY; - - bitmap = kmalloc_node(bitmap_sz, kmalloc_flags, NUMA_NO_NODE); - if (!bitmap && bitmap_sz > PAGE_SIZE) - bitmap = vzalloc(bitmap_sz); - - if (!bitmap) { - err = -ENOMEM; - goto out; - } - - old_block_size = block_size(bdev); - err = set_blocksize(bdev, PAGE_SIZE); - if (err) - goto out; - - reset_bdev(zram); - spin_lock_init(&zram->bitmap_lock); - - zram->old_block_size = old_block_size; - zram->bdev = bdev; - zram->backing_dev = backing_dev; - zram->bitmap = bitmap; - zram->nr_pages = nr_pages; + zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; up_write(&zram->init_lock); - pr_info("setup backing device %s\n", file_name); - kfree(file_name); - return len; -out: - if (bitmap) - kvfree(bitmap); - - if (bdev) - blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); - - if (backing_dev) - filp_close(backing_dev, NULL); - - up_write(&zram->init_lock); - - kfree(file_name); - - return err; -} - -static unsigned long get_entry_bdev(struct zram *zram) -{ - unsigned long entry; - - spin_lock(&zram->bitmap_lock); - /* skip 0 bit to confuse zram.handle = 0 */ - entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1); - if (entry == zram->nr_pages) { - spin_unlock(&zram->bitmap_lock); - return 0; - } - - set_bit(entry, zram->bitmap); - spin_unlock(&zram->bitmap_lock); - - return entry; -} - -static void put_entry_bdev(struct zram *zram, unsigned long entry) -{ - int was_set; - - spin_lock(&zram->bitmap_lock); - was_set = test_and_clear_bit(entry, zram->bitmap); - spin_unlock(&zram->bitmap_lock); - WARN_ON_ONCE(!was_set); } -static void zram_page_end_io(struct bio *bio) -{ - struct page *page = bio->bi_io_vec[0].bv_page; - - page_endio(page, bio_data_dir(bio), bio->bi_error); - bio_put(bio); -} - -/* - * Returns 1 if the submission is successful. - */ -static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, - unsigned long entry, struct bio *parent) -{ - struct bio *bio; - - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) - return -ENOMEM; - - bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - bio->bi_bdev = zram->bdev; - if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { - bio_put(bio); - return -EIO; - } - - if (!parent) { - bio->bi_rw = 0; - bio->bi_end_io = zram_page_end_io; - } else { - bio->bi_rw = parent->bi_rw; - bio_chain(bio, parent); - } - - submit_bio(READ, bio); - return 1; -} - -struct zram_work { - struct work_struct work; - struct zram *zram; - unsigned long entry; - struct bio *bio; -}; - -#if PAGE_SIZE != 4096 -static void zram_sync_read(struct work_struct *work) -{ - struct bio_vec bvec; - struct zram_work *zw = container_of(work, struct zram_work, work); - struct zram *zram = zw->zram; - unsigned long entry = zw->entry; - struct bio *bio = zw->bio; - - read_from_bdev_async(zram, &bvec, entry, bio); -} - -/* - * Block layer want one ->make_request_fn to be active at a time - * so if we use chained IO with parent IO in same context, - * it's a deadlock. To avoid, it, it uses worker thread context. - */ -static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, - unsigned long entry, struct bio *bio) -{ - struct zram_work work; - - work.zram = zram; - work.entry = entry; - work.bio = bio; - - INIT_WORK_ONSTACK(&work.work, zram_sync_read); - queue_work(system_unbound_wq, &work.work); - flush_work(&work.work); - destroy_work_on_stack(&work.work); - - return 1; -} -#else -static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, - unsigned long entry, struct bio *bio) -{ - WARN_ON(1); - return -EIO; -} -#endif - -static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, - unsigned long entry, struct bio *parent, bool sync) -{ - if (sync) - return read_from_bdev_sync(zram, bvec, entry, parent); - else - return read_from_bdev_async(zram, bvec, entry, parent); -} - -static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, - u32 index, struct bio *parent, - unsigned long *pentry) -{ - struct bio *bio; - unsigned long entry; - - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) - return -ENOMEM; - - entry = get_entry_bdev(zram); - if (!entry) { - bio_put(bio); - return -ENOSPC; - } - - bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - bio->bi_bdev = zram->bdev; - if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, - bvec->bv_offset)) { - bio_put(bio); - put_entry_bdev(zram, entry); - return -EIO; - } - - if (!parent) { - bio->bi_rw = REQ_WRITE | REQ_SYNC; - bio->bi_end_io = zram_page_end_io; - } else { - bio->bi_rw = parent->bi_rw; - bio_chain(bio, parent); - } - - submit_bio(WRITE, bio); - *pentry = entry; - - return 0; -} - -static void zram_wb_clear(struct zram *zram, u32 index) -{ - unsigned long entry; - - zram_clear_flag(zram, index, ZRAM_WB); - entry = zram_get_element(zram, index); - zram_set_element(zram, index, 0); - put_entry_bdev(zram, entry); -} - -#else -static bool zram_wb_enabled(struct zram *zram) { return false; } -static inline void reset_bdev(struct zram *zram) {}; -static int write_to_bdev(struct zram *zram, struct bio_vec *bvec, - u32 index, struct bio *parent, - unsigned long *pentry) - -{ - return -EIO; -} - -static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, - unsigned long entry, struct bio *parent, bool sync) -{ - return -EIO; -} -static void zram_wb_clear(struct zram *zram, u32 index) {} -#endif - -#ifdef CONFIG_ZRAM_MEMORY_TRACKING - -static struct dentry *zram_debugfs_root; - -static void zram_debugfs_create(void) -{ - zram_debugfs_root = debugfs_create_dir("zram", NULL); -} - -static void zram_debugfs_destroy(void) +static ssize_t mem_used_max_show(struct device *dev, + struct device_attribute *attr, char *buf) { - debugfs_remove_recursive(zram_debugfs_root); -} + u64 val = 0; + struct zram *zram = dev_to_zram(dev); -static void zram_accessed(struct zram *zram, u32 index) -{ - zram->table[index].ac_time = ktime_get_boottime(); -} + deprecated_attr_warn("mem_used_max"); + down_read(&zram->init_lock); + if (init_done(zram)) + val = atomic_long_read(&zram->stats.max_used_pages); + up_read(&zram->init_lock); -static void zram_reset_access(struct zram *zram, u32 index) -{ - zram->table[index].ac_time.tv64 = 0; + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); } -static ssize_t read_block_state(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t mem_used_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) { - char *kbuf; - ssize_t index, written = 0; - struct zram *zram = file->private_data; - unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct timespec64 ts; - gfp_t kmalloc_flags; - - kmalloc_flags = GFP_KERNEL; - if (count > PAGE_SIZE) - kmalloc_flags |= __GFP_NOWARN | __GFP_NORETRY; - - kbuf = kmalloc_node(count, kmalloc_flags, NUMA_NO_NODE); - if (!kbuf && count > PAGE_SIZE) - kbuf = vmalloc(count); - - if (!kbuf) - return -ENOMEM; + int err; + unsigned long val; + struct zram *zram = dev_to_zram(dev); - down_read(&zram->init_lock); - if (!init_done(zram)) { - up_read(&zram->init_lock); - kvfree(kbuf); + err = kstrtoul(buf, 10, &val); + if (err || val != 0) return -EINVAL; - } - for (index = *ppos; index < nr_pages; index++) { - int copied; - - zram_slot_lock(zram, index); - if (!zram_allocated(zram, index)) - goto next; - - ts = ktime_to_timespec64(zram->table[index].ac_time); - copied = snprintf(kbuf + written, count, - "%12zd %12lld.%06lu %c%c%c\n", - index, (s64)ts.tv_sec, - ts.tv_nsec / NSEC_PER_USEC, - zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.', - zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.', - zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.'); - - if (count < copied) { - zram_slot_unlock(zram, index); - break; - } - written += copied; - count -= copied; -next: - zram_slot_unlock(zram, index); - *ppos += 1; + down_read(&zram->init_lock); + if (init_done(zram)) { + struct zram_meta *meta = zram->meta; + atomic_long_set(&zram->stats.max_used_pages, + zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT); } - up_read(&zram->init_lock); - if (copy_to_user(buf, kbuf, written)) - written = -EFAULT; - kvfree(kbuf); - - return written; -} -static const struct file_operations proc_zram_block_state_op = { - .open = simple_open, - .read = read_block_state, - .llseek = default_llseek, -}; - -static void zram_debugfs_register(struct zram *zram) -{ - if (!zram_debugfs_root) - return; - - zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name, - zram_debugfs_root); - debugfs_create_file("block_state", 0400, zram->debugfs_dir, - zram, &proc_zram_block_state_op); -} - -static void zram_debugfs_unregister(struct zram *zram) -{ - debugfs_remove_recursive(zram->debugfs_dir); + return len; } -#else -static void zram_debugfs_create(void) {}; -static void zram_debugfs_destroy(void) {}; -static void zram_accessed(struct zram *zram, u32 index) {}; -static void zram_reset_access(struct zram *zram, u32 index) {}; -static void zram_debugfs_register(struct zram *zram) {}; -static void zram_debugfs_unregister(struct zram *zram) {}; -#endif /* * We switched to per-cpu streams and this attr is not needed anymore. @@ -793,16 +356,9 @@ static ssize_t comp_algorithm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); - char compressor[ARRAY_SIZE(zram->compressor)]; size_t sz; - strlcpy(compressor, buf, sizeof(compressor)); - /* ignore trailing newline */ - sz = strlen(compressor); - if (sz > 0 && compressor[sz - 1] == '\n') - compressor[sz - 1] = 0x00; - - if (!zcomp_available_algorithm(compressor)) + if (!zcomp_available_algorithm(buf)) return -EINVAL; down_write(&zram->init_lock); @@ -811,8 +367,13 @@ static ssize_t comp_algorithm_store(struct device *dev, pr_info("Can't change algorithm for initialized device\n"); return -EBUSY; } + strlcpy(zram->compressor, buf, sizeof(zram->compressor)); + + /* ignore trailing newline */ + sz = strlen(zram->compressor); + if (sz > 0 && zram->compressor[sz - 1] == '\n') + zram->compressor[sz - 1] = 0x00; - strcpy(zram->compressor, compressor); up_write(&zram->init_lock); return len; } @@ -821,6 +382,7 @@ static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); + struct zram_meta *meta; down_read(&zram->init_lock); if (!init_done(zram)) { @@ -828,7 +390,8 @@ static ssize_t compact_store(struct device *dev, return -EINVAL; } - zs_compact(zram->mem_pool); + meta = zram->meta; + zpool_compact(meta->mem_pool); up_read(&zram->init_lock); return len; @@ -856,49 +419,26 @@ static ssize_t mm_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zram *zram = dev_to_zram(dev); - struct zs_pool_stats pool_stats; u64 orig_size, mem_used = 0; long max_used; ssize_t ret; - memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); - down_read(&zram->init_lock); - if (init_done(zram)) { - mem_used = zs_get_total_pages(zram->mem_pool); - zs_pool_stats(zram->mem_pool, &pool_stats); - } + if (init_done(zram)) + mem_used = zpool_get_total_size(zram->meta->mem_pool); orig_size = atomic64_read(&zram->stats.pages_stored); max_used = atomic_long_read(&zram->stats.max_used_pages); ret = scnprintf(buf, PAGE_SIZE, - "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n", + "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", orig_size << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.compr_data_size), - mem_used << PAGE_SHIFT, + mem_used, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, - (u64)atomic64_read(&zram->stats.same_pages), - pool_stats.pages_compacted, - (u64)atomic64_read(&zram->stats.huge_pages)); - up_read(&zram->init_lock); - - return ret; -} - -static ssize_t debug_stat_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int version = 1; - struct zram *zram = dev_to_zram(dev); - ssize_t ret; - - down_read(&zram->init_lock); - ret = scnprintf(buf, PAGE_SIZE, - "version: %d\n%8llu\n", - version, - (u64)atomic64_read(&zram->stats.writestall)); + (u64)atomic64_read(&zram->stats.zero_pages), + zpool_get_num_compacted(zram->meta->mem_pool)); up_read(&zram->init_lock); return ret; @@ -906,39 +446,77 @@ static ssize_t debug_stat_show(struct device *dev, static DEVICE_ATTR_RO(io_stat); static DEVICE_ATTR_RO(mm_stat); -static DEVICE_ATTR_RO(debug_stat); +ZRAM_ATTR_RO(num_reads); +ZRAM_ATTR_RO(num_writes); +ZRAM_ATTR_RO(failed_reads); +ZRAM_ATTR_RO(failed_writes); +ZRAM_ATTR_RO(invalid_io); +ZRAM_ATTR_RO(notify_free); +ZRAM_ATTR_RO(zero_pages); +ZRAM_ATTR_RO(compr_data_size); + +static inline bool zram_meta_get(struct zram *zram) +{ + if (atomic_inc_not_zero(&zram->refcount)) + return true; + return false; +} + +static inline void zram_meta_put(struct zram *zram) +{ + atomic_dec(&zram->refcount); +} -static void zram_meta_free(struct zram *zram, u64 disksize) +static void zram_meta_free(struct zram_meta *meta, u64 disksize) { size_t num_pages = disksize >> PAGE_SHIFT; size_t index; /* Free all pages that are still in this zram device */ - for (index = 0; index < num_pages; index++) - zram_free_page(zram, index); + for (index = 0; index < num_pages; index++) { + unsigned long handle = meta->table[index].handle; + + if (!handle) + continue; + + zpool_free(meta->mem_pool, handle); + } - zs_destroy_pool(zram->mem_pool); - vfree(zram->table); + zpool_destroy_pool(meta->mem_pool); + vfree(meta->table); + kfree(meta); } -static bool zram_meta_alloc(struct zram *zram, u64 disksize) +static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize) { size_t num_pages; + struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); + char *backend; + + if (!meta) + return NULL; num_pages = disksize >> PAGE_SHIFT; - zram->table = vzalloc(num_pages * sizeof(*zram->table)); - if (!zram->table) - return false; + meta->table = vzalloc(num_pages * sizeof(*meta->table)); + if (!meta->table) { + pr_err("Error allocating zram address table\n"); + goto out_error; + } - zram->mem_pool = zs_create_pool(zram->disk->disk_name); - if (!zram->mem_pool) { - vfree(zram->table); - return false; + backend = strlen(backend_param_buf) ? backend_param_buf : "zsmalloc"; + meta->mem_pool = zpool_create_pool(backend, pool_name, + GFP_NOIO, NULL); + if (!meta->mem_pool) { + pr_err("Error creating memory pool\n"); + goto out_error; } - if (!huge_class_size) - huge_class_size = zs_huge_class_size(zram->mem_pool); - return true; + return meta; + +out_error: + vfree(meta->table); + kfree(meta); + return NULL; } /* @@ -948,195 +526,187 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) */ static void zram_free_page(struct zram *zram, size_t index) { - unsigned long handle; - - zram_reset_access(zram, index); + struct zram_meta *meta = zram->meta; + unsigned long handle = meta->table[index].handle; - if (zram_test_flag(zram, index, ZRAM_HUGE)) { - zram_clear_flag(zram, index, ZRAM_HUGE); - atomic64_dec(&zram->stats.huge_pages); - } - - if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) { - zram_wb_clear(zram, index); - atomic64_dec(&zram->stats.pages_stored); - return; - } - - /* - * No memory is allocated for same element filled pages. - * Simply clear same page flag. - */ - if (zram_test_flag(zram, index, ZRAM_SAME)) { - zram_clear_flag(zram, index, ZRAM_SAME); - zram_set_element(zram, index, 0); - atomic64_dec(&zram->stats.same_pages); - atomic64_dec(&zram->stats.pages_stored); + if (unlikely(!handle)) { + /* + * No memory is allocated for zero filled pages. + * Simply clear zero page flag. + */ + if (zram_test_flag(meta, index, ZRAM_ZERO)) { + zram_clear_flag(meta, index, ZRAM_ZERO); + atomic64_dec(&zram->stats.zero_pages); + } return; } - handle = zram_get_handle(zram, index); - if (!handle) - return; - - zs_free(zram->mem_pool, handle); + zpool_free(meta->mem_pool, handle); - atomic64_sub(zram_get_obj_size(zram, index), + atomic64_sub(zram_get_obj_size(meta, index), &zram->stats.compr_data_size); atomic64_dec(&zram->stats.pages_stored); - zram_set_handle(zram, index, 0); - zram_set_obj_size(zram, index, 0); + meta->table[index].handle = 0; + zram_set_obj_size(meta, index, 0); } -static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, - struct bio *bio, bool partial_io) +static int zram_decompress_page(struct zram *zram, char *mem, u32 index) { - int ret; + int ret = 0; + unsigned char *cmem; + struct zram_meta *meta = zram->meta; unsigned long handle; - unsigned int size; - void *src, *dst; - - if (zram_wb_enabled(zram)) { - zram_slot_lock(zram, index); - if (zram_test_flag(zram, index, ZRAM_WB)) { - struct bio_vec bvec; - - zram_slot_unlock(zram, index); - - bvec.bv_page = page; - bvec.bv_len = PAGE_SIZE; - bvec.bv_offset = 0; - return read_from_bdev(zram, &bvec, - zram_get_element(zram, index), - bio, partial_io); - } - zram_slot_unlock(zram, index); - } + size_t size; + + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + handle = meta->table[index].handle; + size = zram_get_obj_size(meta, index); - zram_slot_lock(zram, index); - handle = zram_get_handle(zram, index); - if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { - unsigned long value; - void *mem; - - value = handle ? zram_get_element(zram, index) : 0; - mem = kmap_atomic(page); - zram_fill_page(mem, PAGE_SIZE, value); - kunmap_atomic(mem); - zram_slot_unlock(zram, index); + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + memset(mem, 0, PAGE_SIZE); return 0; } - size = zram_get_obj_size(zram, index); - - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (size == PAGE_SIZE) { - dst = kmap_atomic(page); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); - ret = 0; - } else { - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); - - dst = kmap_atomic(page); - ret = zcomp_decompress(zstrm, src, size, dst); - kunmap_atomic(dst); - zcomp_stream_put(zram->comp); - } - zs_unmap_object(zram->mem_pool, handle); - zram_slot_unlock(zram, index); + cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_RO); + if (size == PAGE_SIZE) + memcpy(mem, cmem, PAGE_SIZE); + else + ret = zcomp_decompress(zram->comp, cmem, size, mem); + zpool_unmap_handle(meta->mem_pool, handle); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); /* Should NEVER happen. Return bio error if it does. */ - if (unlikely(ret)) + if (unlikely(ret)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); + return ret; + } - return ret; + return 0; } static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset, struct bio *bio) + u32 index, int offset) { int ret; struct page *page; - + unsigned char *user_mem, *uncmem = NULL; + struct zram_meta *meta = zram->meta; page = bvec->bv_page; - if (is_partial_io(bvec)) { - /* Use a temporary buffer to decompress the page */ - page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); - if (!page) - return -ENOMEM; + + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + if (unlikely(!meta->table[index].handle) || + zram_test_flag(meta, index, ZRAM_ZERO)) { + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + handle_zero_page(bvec); + return 0; } + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); - ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); - if (unlikely(ret)) - goto out; + if (is_partial_io(bvec)) + /* Use a temporary buffer to decompress the page */ + uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); - if (is_partial_io(bvec)) { - void *dst = kmap_atomic(bvec->bv_page); - void *src = kmap_atomic(page); + user_mem = kmap_atomic(page); + if (!is_partial_io(bvec)) + uncmem = user_mem; - memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); - kunmap_atomic(src); - kunmap_atomic(dst); + if (!uncmem) { + pr_err("Unable to allocate temp memory\n"); + ret = -ENOMEM; + goto out_cleanup; } -out: + + ret = zram_decompress_page(zram, uncmem, index); + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) + goto out_cleanup; + if (is_partial_io(bvec)) - __free_page(page); + memcpy(user_mem + bvec->bv_offset, uncmem + offset, + bvec->bv_len); + flush_dcache_page(page); + ret = 0; +out_cleanup: + kunmap_atomic(user_mem); + if (is_partial_io(bvec)) + kfree(uncmem); return ret; } -static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, - u32 index, struct bio *bio) +static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, + int offset) { int ret = 0; - unsigned long alloced_pages; + size_t clen; unsigned long handle = 0; - unsigned int comp_len = 0; - void *src, *dst, *mem; - struct zcomp_strm *zstrm; - struct page *page = bvec->bv_page; - unsigned long element = 0; - enum zram_pageflags flags = 0; - bool allow_wb = true; + struct page *page; + unsigned char *user_mem, *cmem, *src, *uncmem = NULL; + struct zram_meta *meta = zram->meta; + struct zcomp_strm *zstrm = NULL; + unsigned long alloced_pages; + + page = bvec->bv_page; + if (is_partial_io(bvec)) { + /* + * This is a partial IO. We need to read the full page + * before to write the changes. + */ + uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); + if (!uncmem) { + ret = -ENOMEM; + goto out; + } + ret = zram_decompress_page(zram, uncmem, index); + if (ret) + goto out; + } - mem = kmap_atomic(page); - if (page_same_filled(mem, &element)) { - kunmap_atomic(mem); +compress_again: + user_mem = kmap_atomic(page); + if (is_partial_io(bvec)) { + memcpy(uncmem + offset, user_mem + bvec->bv_offset, + bvec->bv_len); + kunmap_atomic(user_mem); + user_mem = NULL; + } else { + uncmem = user_mem; + } + + if (page_zero_filled(uncmem)) { + if (user_mem) + kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ - flags = ZRAM_SAME; - atomic64_inc(&zram->stats.same_pages); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_free_page(zram, index); + zram_set_flag(meta, index, ZRAM_ZERO); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + + atomic64_inc(&zram->stats.zero_pages); + ret = 0; goto out; } - kunmap_atomic(mem); -compress_again: - zstrm = zcomp_stream_get(zram->comp); - src = kmap_atomic(page); - ret = zcomp_compress(zstrm, src, &comp_len); - kunmap_atomic(src); + zstrm = zcomp_strm_find(zram->comp); + ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); + if (!is_partial_io(bvec)) { + kunmap_atomic(user_mem); + user_mem = NULL; + uncmem = NULL; + } if (unlikely(ret)) { - zcomp_stream_put(zram->comp); pr_err("Compression failed! err=%d\n", ret); - zs_free(zram->mem_pool, handle); - return ret; + goto out; } - if (unlikely(comp_len >= huge_class_size)) { - comp_len = PAGE_SIZE; - if (zram_wb_enabled(zram) && allow_wb) { - zcomp_stream_put(zram->comp); - ret = write_to_bdev(zram, bvec, index, bio, &element); - if (!ret) { - flags = ZRAM_WB; - ret = 1; - goto out; - } - allow_wb = false; - goto compress_again; - } + src = zstrm->buffer; + if (unlikely(clen > max_zpage_size)) { + clen = PAGE_SIZE; + if (is_partial_io(bvec)) + src = uncmem; } /* @@ -1153,108 +723,64 @@ compress_again: * from the slow path and handle has already been allocated. */ if (!handle) - handle = zs_malloc(zram->mem_pool, comp_len, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); - if (!handle) { - zcomp_stream_put(zram->comp); - atomic64_inc(&zram->stats.writestall); - handle = zs_malloc(zram->mem_pool, comp_len, - GFP_NOIO | __GFP_HIGHMEM | - __GFP_MOVABLE); - if (handle) + ret = zpool_malloc(meta->mem_pool, clen, + __GFP_KSWAPD_RECLAIM | __GFP_NOWARN, &handle); + if (ret < 0) { + zcomp_strm_release(zram->comp, zstrm); + zstrm = NULL; + + ret = zpool_malloc(meta->mem_pool, clen, + GFP_NOIO, &handle); + if (ret == 0) goto compress_again; - return -ENOMEM; + + pr_err("Error allocating memory for compressed page: %u, size=%zu\n", + index, clen); + ret = -ENOMEM; + goto out; } - alloced_pages = zs_get_total_pages(zram->mem_pool); + alloced_pages = zpool_get_total_size(meta->mem_pool) >> PAGE_SHIFT; update_used_max(zram, alloced_pages); - if (zram->limit_pages && alloced_pages > zram->limit_pages) { - zcomp_stream_put(zram->comp); - zs_free(zram->mem_pool, handle); - return -ENOMEM; + zpool_free(meta->mem_pool, handle); + ret = -ENOMEM; + goto out; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); + cmem = zpool_map_handle(meta->mem_pool, handle, ZPOOL_MM_WO); - src = zstrm->buffer; - if (comp_len == PAGE_SIZE) + if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { src = kmap_atomic(page); - memcpy(dst, src, comp_len); - if (comp_len == PAGE_SIZE) + memcpy(cmem, src, PAGE_SIZE); kunmap_atomic(src); + } else { + memcpy(cmem, src, clen); + } + + zcomp_strm_release(zram->comp, zstrm); + zstrm = NULL; + zpool_unmap_handle(meta->mem_pool, handle); - zcomp_stream_put(zram->comp); - zs_unmap_object(zram->mem_pool, handle); - atomic64_add(comp_len, &zram->stats.compr_data_size); -out: /* * Free memory associated with this sector * before overwriting unused sectors. */ - zram_slot_lock(zram, index); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); - if (comp_len == PAGE_SIZE) { - zram_set_flag(zram, index, ZRAM_HUGE); - atomic64_inc(&zram->stats.huge_pages); - } - - if (flags) { - zram_set_flag(zram, index, flags); - zram_set_element(zram, index, element); - } else { - zram_set_handle(zram, index, handle); - zram_set_obj_size(zram, index, comp_len); - } - zram_slot_unlock(zram, index); + meta->table[index].handle = handle; + zram_set_obj_size(meta, index, clen); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); /* Update stats */ + atomic64_add(clen, &zram->stats.compr_data_size); atomic64_inc(&zram->stats.pages_stored); - return ret; -} - -static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, - u32 index, int offset, struct bio *bio) -{ - int ret; - struct page *page = NULL; - void *src; - struct bio_vec vec; - - vec = *bvec; - if (is_partial_io(bvec)) { - void *dst; - /* - * This is a partial IO. We need to read the full page - * before to write the changes. - */ - page = alloc_page(GFP_NOIO|__GFP_HIGHMEM); - if (!page) - return -ENOMEM; - - ret = __zram_bvec_read(zram, page, index, bio, true); - if (ret) - goto out; - - src = kmap_atomic(bvec->bv_page); - dst = kmap_atomic(page); - memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); - kunmap_atomic(dst); - kunmap_atomic(src); - - vec.bv_page = page; - vec.bv_len = PAGE_SIZE; - vec.bv_offset = 0; - } - - ret = __zram_bvec_write(zram, &vec, index, bio); out: + if (zstrm) + zcomp_strm_release(zram->comp, zstrm); if (is_partial_io(bvec)) - __free_page(page); + kfree(uncmem); return ret; } @@ -1267,6 +793,7 @@ static void zram_bio_discard(struct zram *zram, u32 index, int offset, struct bio *bio) { size_t n = bio->bi_iter.bi_size; + struct zram_meta *meta = zram->meta; /* * zram manages data in physical block size units. Because logical block @@ -1287,22 +814,17 @@ static void zram_bio_discard(struct zram *zram, u32 index, } while (n >= PAGE_SIZE) { - zram_slot_lock(zram, index); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); - zram_slot_unlock(zram, index); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); atomic64_inc(&zram->stats.notify_free); index++; n -= PAGE_SIZE; } } -/* - * Returns errno if it has some problem. Otherwise return 0 or 1. - * Returns 0 if IO request was done synchronously - * Returns 1 if IO request was successfully submitted. - */ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, - int offset, int rw, struct bio *bio) + int offset, int rw) { unsigned long start_time = jiffies; int ret; @@ -1312,20 +834,15 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, if (rw == READ) { atomic64_inc(&zram->stats.num_reads); - ret = zram_bvec_read(zram, bvec, index, offset, bio); - flush_dcache_page(bvec->bv_page); + ret = zram_bvec_read(zram, bvec, index, offset); } else { atomic64_inc(&zram->stats.num_writes); - ret = zram_bvec_write(zram, bvec, index, offset, bio); + ret = zram_bvec_write(zram, bvec, index, offset); } generic_end_io_acct(rw, &zram->disk->part0, start_time); - zram_slot_lock(zram, index); - zram_accessed(zram, index); - zram_slot_unlock(zram, index); - - if (unlikely(ret < 0)) { + if (unlikely(ret)) { if (rw == READ) atomic64_inc(&zram->stats.failed_reads); else @@ -1354,20 +871,31 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) rw = bio_data_dir(bio); bio_for_each_segment(bvec, bio, iter) { - struct bio_vec bv = bvec; - unsigned int unwritten = bvec.bv_len; + int max_transfer_size = PAGE_SIZE - offset; + + if (bvec.bv_len > max_transfer_size) { + /* + * zram_bvec_rw() can only make operation on a single + * zram page. Split the bio vector. + */ + struct bio_vec bv; - do { - bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, - unwritten); - if (zram_bvec_rw(zram, &bv, index, offset, rw, bio) < 0) + bv.bv_page = bvec.bv_page; + bv.bv_len = max_transfer_size; + bv.bv_offset = bvec.bv_offset; + + if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) goto out; - bv.bv_offset += bv.bv_len; - unwritten -= bv.bv_len; + bv.bv_len = bvec.bv_len - max_transfer_size; + bv.bv_offset += max_transfer_size; + if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) + goto out; + } else + if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) + goto out; - update_position(&index, &offset, &bv); - } while (unwritten); + update_position(&index, &offset, &bvec); } bio_endio(bio); @@ -1384,15 +912,22 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) { struct zram *zram = queue->queuedata; + if (unlikely(!zram_meta_get(zram))) + goto error; + + blk_queue_split(queue, &bio, queue->bio_split); + if (!valid_io_request(zram, bio->bi_iter.bi_sector, bio->bi_iter.bi_size)) { atomic64_inc(&zram->stats.invalid_io); - goto error; + goto put_zram; } __zram_make_request(zram, bio); + zram_meta_put(zram); return BLK_QC_T_NONE; - +put_zram: + zram_meta_put(zram); error: bio_io_error(bio); return BLK_QC_T_NONE; @@ -1402,39 +937,45 @@ static void zram_slot_free_notify(struct block_device *bdev, unsigned long index) { struct zram *zram; + struct zram_meta *meta; zram = bdev->bd_disk->private_data; + meta = zram->meta; - zram_slot_lock(zram, index); + bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); zram_free_page(zram, index); - zram_slot_unlock(zram, index); + bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); atomic64_inc(&zram->stats.notify_free); } static int zram_rw_page(struct block_device *bdev, sector_t sector, struct page *page, int rw) { - int offset, ret; + int offset, err = -EIO; u32 index; struct zram *zram; struct bio_vec bv; zram = bdev->bd_disk->private_data; + if (unlikely(!zram_meta_get(zram))) + goto out; if (!valid_io_request(zram, sector, PAGE_SIZE)) { atomic64_inc(&zram->stats.invalid_io); - ret = -EINVAL; - goto out; + err = -EINVAL; + goto put_zram; } index = sector >> SECTORS_PER_PAGE_SHIFT; - offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; + offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; bv.bv_page = page; bv.bv_len = PAGE_SIZE; bv.bv_offset = 0; - ret = zram_bvec_rw(zram, &bv, index, offset, rw, NULL); + err = zram_bvec_rw(zram, &bv, index, offset, rw); +put_zram: + zram_meta_put(zram); out: /* * If I/O fails, just return error(ie, non-zero) without @@ -1444,24 +985,14 @@ out: * bio->bi_end_io does things to handle the error * (e.g., SetPageError, set_page_dirty and extra works). */ - if (unlikely(ret < 0)) - return ret; - - switch (ret) { - case 0: + if (err == 0) page_endio(page, rw, 0); - break; - case 1: - ret = 0; - break; - default: - WARN_ON(1); - } - return ret; + return err; } static void zram_reset_device(struct zram *zram) { + struct zram_meta *meta; struct zcomp *comp; u64 disksize; @@ -1474,8 +1005,23 @@ static void zram_reset_device(struct zram *zram) return; } + meta = zram->meta; comp = zram->comp; disksize = zram->disksize; + /* + * Refcount will go down to 0 eventually and r/w handler + * cannot handle further I/O so it will bail out by + * check zram_meta_get. + */ + zram_meta_put(zram); + /* + * We want to free zram_meta in process context to avoid + * deadlock between reclaim path and any other locks. + */ + wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); + + /* Reset stats */ + memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; set_capacity(zram->disk, 0); @@ -1483,10 +1029,8 @@ static void zram_reset_device(struct zram *zram) up_write(&zram->init_lock); /* I/O operation under all of CPU are done so let's free */ - zram_meta_free(zram, disksize); - memset(&zram->stats, 0, sizeof(zram->stats)); + zram_meta_free(meta, disksize); zcomp_destroy(comp); - reset_bdev(zram); } static ssize_t disksize_store(struct device *dev, @@ -1494,6 +1038,7 @@ static ssize_t disksize_store(struct device *dev, { u64 disksize; struct zcomp *comp; + struct zram_meta *meta; struct zram *zram = dev_to_zram(dev); int err; @@ -1501,18 +1046,10 @@ static ssize_t disksize_store(struct device *dev, if (!disksize) return -EINVAL; - down_write(&zram->init_lock); - if (init_done(zram)) { - pr_info("Cannot change disksize for initialized device\n"); - err = -EBUSY; - goto out_unlock; - } - disksize = PAGE_ALIGN(disksize); - if (!zram_meta_alloc(zram, disksize)) { - err = -ENOMEM; - goto out_unlock; - } + meta = zram_meta_alloc(zram->disk->disk_name, disksize); + if (!meta) + return -ENOMEM; comp = zcomp_create(zram->compressor); if (IS_ERR(comp)) { @@ -1522,19 +1059,35 @@ static ssize_t disksize_store(struct device *dev, goto out_free_meta; } + down_write(&zram->init_lock); + if (init_done(zram)) { + pr_info("Cannot change disksize for initialized device\n"); + err = -EBUSY; + goto out_destroy_comp; + } + + init_waitqueue_head(&zram->io_done); + atomic_set(&zram->refcount, 1); + zram->meta = meta; zram->comp = comp; zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); + up_write(&zram->init_lock); + /* + * Revalidate disk out of the init_lock to avoid lockdep splat. + * It's okay because disk's capacity is protected by init_lock + * so that revalidate_disk always sees up-to-date capacity. + */ revalidate_disk(zram->disk); - up_write(&zram->init_lock); return len; -out_free_meta: - zram_meta_free(zram, disksize); -out_unlock: +out_destroy_comp: up_write(&zram->init_lock); + zcomp_destroy(comp); +out_free_meta: + zram_meta_free(meta, disksize); return err; } @@ -1609,41 +1162,41 @@ static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); static DEVICE_ATTR_RO(initstate); static DEVICE_ATTR_WO(reset); -static DEVICE_ATTR_WO(mem_limit); -static DEVICE_ATTR_WO(mem_used_max); +static DEVICE_ATTR_RO(orig_data_size); +static DEVICE_ATTR_RO(mem_used_total); +static DEVICE_ATTR_RW(mem_limit); +static DEVICE_ATTR_RW(mem_used_max); static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); -#ifdef CONFIG_ZRAM_WRITEBACK -static DEVICE_ATTR_RW(backing_dev); -#endif static struct attribute *zram_disk_attrs[] = { &dev_attr_disksize.attr, &dev_attr_initstate.attr, &dev_attr_reset.attr, + &dev_attr_num_reads.attr, + &dev_attr_num_writes.attr, + &dev_attr_failed_reads.attr, + &dev_attr_failed_writes.attr, &dev_attr_compact.attr, + &dev_attr_invalid_io.attr, + &dev_attr_notify_free.attr, + &dev_attr_zero_pages.attr, + &dev_attr_orig_data_size.attr, + &dev_attr_compr_data_size.attr, + &dev_attr_mem_used_total.attr, &dev_attr_mem_limit.attr, &dev_attr_mem_used_max.attr, &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, -#ifdef CONFIG_ZRAM_WRITEBACK - &dev_attr_backing_dev.attr, -#endif &dev_attr_io_stat.attr, &dev_attr_mm_stat.attr, - &dev_attr_debug_stat.attr, NULL, }; -static const struct attribute_group zram_disk_attr_group = { +static struct attribute_group zram_disk_attr_group = { .attrs = zram_disk_attrs, }; -static const struct attribute_group *zram_disk_attr_groups[] = { - &zram_disk_attr_group, - NULL, -}; - /* * Allocate and initialize new zram device. the function returns * '>= 0' device_id upon success, and negative value otherwise. @@ -1698,7 +1251,6 @@ static int zram_add(void) /* zram devices sort of resembles non-rotational disks */ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); - /* * To ensure that we always get PAGE_SIZE aligned * and n*PAGE_SIZED sized I/O requests. @@ -1709,6 +1261,8 @@ static int zram_add(void) blk_queue_io_min(zram->disk->queue, PAGE_SIZE); blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; + zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE; + zram->disk->queue->limits.chunk_sectors = 0; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); /* * zram_bio_discard() will clear all logical blocks if logical block @@ -1724,17 +1278,24 @@ static int zram_add(void) zram->disk->queue->limits.discard_zeroes_data = 0; queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); - zram->disk->queue->backing_dev_info->capabilities |= - BDI_CAP_STABLE_WRITES; - disk_to_dev(zram->disk)->groups = zram_disk_attr_groups; add_disk(zram->disk); + ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, + &zram_disk_attr_group); + if (ret < 0) { + pr_err("Error creating sysfs group for device %d\n", + device_id); + goto out_free_disk; + } strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); + zram->meta = NULL; - zram_debugfs_register(zram); pr_info("Added device: %s\n", zram->disk->disk_name); return device_id; +out_free_disk: + del_gendisk(zram->disk); + put_disk(zram->disk); out_free_queue: blk_cleanup_queue(queue); out_free_idr: @@ -1762,7 +1323,15 @@ static int zram_remove(struct zram *zram) zram->claim = true; mutex_unlock(&bdev->bd_mutex); - zram_debugfs_unregister(zram); + /* + * Remove sysfs first, so no one will perform a disksize + * store while we destroy the devices. This also helps during + * hot_remove -- zram_reset_device() is the last holder of + * ->init_lock, no later/concurrent disksize_store() or any + * other sysfs handlers are possible. + */ + sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, + &zram_disk_attr_group); /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); @@ -1771,8 +1340,8 @@ static int zram_remove(struct zram *zram) pr_info("Removed device: %s\n", zram->disk->disk_name); - del_gendisk(zram->disk); blk_cleanup_queue(zram->disk->queue); + del_gendisk(zram->disk); put_disk(zram->disk); kfree(zram); return 0; @@ -1852,7 +1421,6 @@ static void destroy_devices(void) { class_unregister(&zram_control_class); idr_for_each(&zram_index_idr, &zram_remove_cb, NULL); - zram_debugfs_destroy(); idr_destroy(&zram_index_idr); unregister_blkdev(zram_major, "zram"); } @@ -1867,7 +1435,6 @@ static int __init zram_init(void) return ret; } - zram_debugfs_create(); zram_major = register_blkdev(0, "zram"); if (zram_major <= 0) { pr_err("Unable to get major number\n"); @@ -1901,6 +1468,8 @@ module_exit(zram_exit); module_param(num_devices, uint, 0); MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices"); +module_param_string(backend, backend_param_buf, BACKEND_PARAM_BUF_SIZE, 0); +MODULE_PARM_DESC(backend, "Compression storage (backend) name"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Nitin Gupta "); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 4b7adb21dea6..f9a4c8b6bedc 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -11,13 +11,17 @@ * Released under the terms of GNU General Public License Version 2.0 * */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #ifndef _ZRAM_DRV_H_ #define _ZRAM_DRV_H_ -#include -#include -#include +#include +#include #include "zcomp.h" @@ -37,6 +41,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /*-- End of configurable params */ +#define SECTOR_SHIFT 9 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) #define ZRAM_LOGICAL_BLOCK_SHIFT 12 @@ -59,11 +64,9 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /* Flags for zram pages (table[page_no].value) */ enum zram_pageflags { - /* zram slot is locked */ - ZRAM_LOCK = ZRAM_FLAG_SHIFT, - ZRAM_SAME, /* Page consists the same element */ - ZRAM_WB, /* page is stored on backing_device */ - ZRAM_HUGE, /* Incompressible page */ + /* Page consists entirely of zeros */ + ZRAM_ZERO = ZRAM_FLAG_SHIFT, + ZRAM_ACCESS, /* page is now accessed */ __NR_ZRAM_PAGEFLAGS, }; @@ -72,14 +75,8 @@ enum zram_pageflags { /* Allocated for each disk page */ struct zram_table_entry { - union { - unsigned long handle; - unsigned long element; - }; + unsigned long handle; unsigned long value; -#ifdef CONFIG_ZRAM_MEMORY_TRACKING - ktime_t ac_time; -#endif }; struct zram_stats { @@ -90,16 +87,18 @@ struct zram_stats { atomic64_t failed_writes; /* can happen when memory is too low */ atomic64_t invalid_io; /* non-page-aligned I/O requests */ atomic64_t notify_free; /* no. of swap slot free notifications */ - atomic64_t same_pages; /* no. of same element filled pages */ - atomic64_t huge_pages; /* no. of huge pages */ + atomic64_t zero_pages; /* no. of zero filled pages */ atomic64_t pages_stored; /* no. of pages currently stored */ atomic_long_t max_used_pages; /* no. of maximum pages stored */ - atomic64_t writestall; /* no. of write slow paths */ }; -struct zram { +struct zram_meta { struct zram_table_entry *table; - struct zs_pool *mem_pool; + struct zpool *mem_pool; +}; + +struct zram { + struct zram_meta *meta; struct zcomp *comp; struct gendisk *disk; /* Prevent concurrent execution of device init */ @@ -110,26 +109,18 @@ struct zram { unsigned long limit_pages; struct zram_stats stats; + atomic_t refcount; /* refcount for zram_meta */ + /* wait all IO under all of cpu are done */ + wait_queue_head_t io_done; /* * This is the limit on amount of *uncompressed* worth of data * we can store in a disk. */ u64 disksize; /* bytes */ - char compressor[CRYPTO_MAX_ALG_NAME]; + char compressor[10]; /* * zram is claimed so open request will be failed */ bool claim; /* Protected by bdev->bd_mutex */ -#ifdef CONFIG_ZRAM_WRITEBACK - struct file *backing_dev; - struct block_device *bdev; - unsigned int old_block_size; - unsigned long *bitmap; - unsigned long nr_pages; - spinlock_t bitmap_lock; -#endif -#ifdef CONFIG_ZRAM_MEMORY_TRACKING - struct dentry *debugfs_dir; -#endif }; #endif diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index eac9eeec3a53..616ec2ac1b22 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -645,9 +645,6 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); - if (!data->bulk_pkt_size) - goto done; - rwlock_init(&data->lock); data->reassembly = NULL; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c3eaaa35185b..7039a58a6a4e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2489,9 +2489,11 @@ static const struct qca_device_info qca_devices_table[] = { { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */ }; -static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, +static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request, void *data, u16 size) { + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; int pipe, err; u8 *buf; @@ -2506,7 +2508,7 @@ static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); if (err < 0) { - dev_err(&udev->dev, "Failed to access otp area (%d)", err); + BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err); goto done; } @@ -2553,11 +2555,6 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev, sent += size; count -= size; - /* ep2 need time to switch from function acl to function dfu, - * so we add 20ms delay here. - */ - msleep(20); - while (count) { size = min_t(size_t, count, QCA_DFU_PACKET_LEN); @@ -2662,38 +2659,20 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, return err; } -/* identify the ROM version and check whether patches are needed */ -static bool btusb_qca_need_patch(struct usb_device *udev) -{ - struct qca_version ver; - - if (btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, - sizeof(ver)) < 0) - return false; - /* only low ROM versions need patches */ - return !(le32_to_cpu(ver.rom_version) & ~0xffffU); -} - static int btusb_setup_qca(struct hci_dev *hdev) { - struct btusb_data *btdata = hci_get_drvdata(hdev); - struct usb_device *udev = btdata->udev; const struct qca_device_info *info = NULL; struct qca_version ver; u32 ver_rom; u8 status; int i, err; - err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver, sizeof(ver)); if (err < 0) return err; ver_rom = le32_to_cpu(ver.rom_version); - /* Don't care about high ROM versions */ - if (ver_rom & ~0xffffU) - return 0; - for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) { if (ver_rom == qca_devices_table[i].rom_version) info = &qca_devices_table[i]; @@ -2704,7 +2683,7 @@ static int btusb_setup_qca(struct hci_dev *hdev) return -ENODEV; } - err = btusb_qca_send_vendor_req(udev, QCA_CHECK_STATUS, &status, + err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status, sizeof(status)); if (err < 0) return err; @@ -2850,8 +2829,7 @@ static int btusb_probe(struct usb_interface *intf, /* Old firmware would otherwise let ath3k driver load * patch and sysconfig files */ - if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001 && - !btusb_qca_need_patch(udev)) + if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) return -ENODEV; } @@ -2998,7 +2976,6 @@ static int btusb_probe(struct usb_interface *intf, } if (id->driver_info & BTUSB_ATH3012) { - data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index e6284fc1689b..1c543effe062 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -544,8 +544,10 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id); ++id; ret = device_register(&dev->dev); - if (ret) + if (ret) { put_device(&dev->dev); + kfree(dev); + } } } diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index 624f74d03a83..5012e3ad1225 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev) */ l3->debug_irq = platform_get_irq(pdev, 0); ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, - IRQF_NO_THREAD, "l3-dbg-irq", l3); + 0x0, "l3-dbg-irq", l3); if (ret) { dev_err(l3->dev, "request_irq failed for %d\n", l3->debug_irq); @@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev) l3->app_irq = platform_get_irq(pdev, 1); ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, - IRQF_NO_THREAD, "l3-app-irq", l3); + 0x0, "l3-app-irq", l3); if (ret) dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 86110a2abf0f..1852d19d0d7b 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -773,13 +773,6 @@ static int probe_gdrom_setupqueue(void) static int probe_gdrom(struct platform_device *devptr) { int err; - - /* - * Ensure our "one" device is initialized properly in case of previous - * usages of it - */ - memset(&gd, 0, sizeof(gd)); - /* Start the device */ if (gdrom_execute_diagnostic() != 1) { pr_warning("ATA Probe for GDROM failed\n"); @@ -857,8 +850,6 @@ static int remove_gdrom(struct platform_device *devptr) if (gdrom_major) unregister_blkdev(gdrom_major, GDROM_DEV_NAME); unregister_cdrom(gd.cd_info); - kfree(gd.cd_info); - kfree(gd.toc); return 0; } @@ -874,7 +865,7 @@ static struct platform_driver gdrom_driver = { static int __init init_gdrom(void) { int rc; - + gd.toc = NULL; rc = platform_driver_register(&gdrom_driver); if (rc) return rc; @@ -890,6 +881,8 @@ static void __exit exit_gdrom(void) { platform_device_unregister(pd); platform_driver_unregister(&gdrom_driver); + kfree(gd.toc); + kfree(gd.cd_info); } module_init(init_gdrom); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index dd5645128bc1..f2be7f119e8c 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -297,7 +297,6 @@ struct fastrpc_mmap { int uncached; int secure; uintptr_t attr; - bool is_filemap; /*flag to indicate map used in process init*/ }; struct fastrpc_perf { @@ -557,10 +556,9 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, spin_lock(&me->hlock); hlist_for_each_entry_safe(map, n, &me->maps, hn) { - if (map->refs == 1 && map->raddr == va && + if (map->raddr == va && map->raddr + map->len == va + len && - /*Remove map if not used in process initialization*/ - !map->is_filemap) { + map->refs == 1) { match = map; hlist_del_init(&map->hn); break; @@ -573,10 +571,9 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va, } spin_lock(&fl->hlock); hlist_for_each_entry_safe(map, n, &fl->maps, hn) { - if (map->refs == 1 && map->raddr == va && + if (map->raddr == va && map->raddr + map->len == va + len && - /*Remove map if not used in process initialization*/ - !map->is_filemap) { + map->refs == 1) { match = map; hlist_del_init(&map->hn); break; @@ -714,7 +711,6 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr, map->fl = fl; map->fd = fd; map->attr = attr; - map->is_filemap = false; if (mflags == ADSP_MMAP_HEAP_ADDR || mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) { DEFINE_DMA_ATTRS(rh_attrs); @@ -1904,8 +1900,6 @@ static int fastrpc_init_process(struct fastrpc_file *fl, if (init->filelen) { VERIFY(err, !fastrpc_mmap_create(fl, init->filefd, 0, init->file, init->filelen, mflags, &file)); - if (file) - file->is_filemap = true; if (err) goto bail; } @@ -2334,13 +2328,11 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl, VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map)); if (err) goto bail; - if (map) { - VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr, - map->phys, map->size, map->flags)); - if (err) - goto bail; - fastrpc_mmap_free(map); - } + VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr, + map->phys, map->size, map->flags)); + if (err) + goto bail; + fastrpc_mmap_free(map); bail: if (err && map) fastrpc_mmap_add(map); diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 07de755ca30c..c528f96ee204 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -124,7 +124,7 @@ config AGP_HP_ZX1 config AGP_PARISC tristate "HP Quicksilver AGP support" - depends on AGP && PARISC && 64BIT && IOMMU_SBA + depends on AGP && PARISC && 64BIT help This option gives you AGP GART support for the HP Quicksilver AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 1d5510cb6db4..15f2e7025b78 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c @@ -285,7 +285,7 @@ agp_ioc_init(void __iomem *ioc_regs) return 0; } -static int __init +static int lba_find_capability(int cap) { struct _parisc_agp_info *info = &parisc_agp_info; @@ -370,7 +370,7 @@ fail: return error; } -static int __init +static int find_quicksilver(struct device *dev, void *data) { struct parisc_device **lba = data; @@ -382,7 +382,7 @@ find_quicksilver(struct device *dev, void *data) return 0; } -static int __init +static int parisc_agp_init(void) { extern struct sba_device *sba_list; diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 973bc3b1c5b5..cf2e08f65b64 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1061,11 +1061,6 @@ void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source, return; } - if (token != entry->client_info.token) { - mutex_unlock(&driver->dci_mutex); - return; - } - mutex_lock(&entry->buffers[data_source].buf_mutex); rsp_buf = entry->buffers[data_source].buf_cmd; @@ -3157,7 +3152,6 @@ fail_alloc: kfree(new_entry); new_entry = NULL; } - put_task_struct(current); mutex_unlock(&driver->dci_mutex); return DIAG_DCI_NO_REG; } diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index eb205f9173f4..5b38d7a8202a 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -976,8 +976,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) if (ACPI_SUCCESS(status)) { hdp->hd_phys_address = addr.address.minimum; hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); - if (!hdp->hd_address) - return AE_ERROR; if (hpet_is_known(hdp)) { iounmap(hdp->hd_address); @@ -991,8 +989,6 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) hdp->hd_phys_address = fixmem32->address; hdp->hd_address = ioremap(fixmem32->address, HPET_RANGE_SIZE); - if (!hdp->hd_address) - return AE_ERROR; if (hpet_is_known(hdp)) { iounmap(hdp->hd_address); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index e409de5a0cb4..4ada103945f0 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -393,18 +393,16 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, data[0] = 0; WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); - if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - if ((ipmi_version_major > 1) || - ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { - /* This is an IPMI 1.5-only feature. */ - data[0] |= WDOG_DONT_STOP_ON_SET; - } else { - /* - * In ipmi 1.0, setting the timer stops the watchdog, we - * need to start it back up again. - */ - hbnow = 1; - } + if ((ipmi_version_major > 1) + || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { + /* This is an IPMI 1.5-only feature. */ + data[0] |= WDOG_DONT_STOP_ON_SET; + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { + /* + * In ipmi 1.0, setting the timer stops the watchdog, we + * need to start it back up again. + */ + hbnow = 1; } data[1] = 0; diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h index b167163b1827..fba6ab1160ce 100644 --- a/drivers/char/mwave/3780i.h +++ b/drivers/char/mwave/3780i.h @@ -68,7 +68,7 @@ typedef struct { unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ - unsigned short Reserved:13; /* 0: Reserved */ + unsigned char Reserved:5; /* 0: Reserved */ } DSP_ISA_SLAVE_CONTROL; diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index f8d98f7e6fb7..c115217c79ae 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -544,10 +544,6 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) io_read_num_rec_bytes(iobase, &num_bytes_read); if (num_bytes_read >= 4) { DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); - if (num_bytes_read > 4) { - rc = -EIO; - goto exit_setprotocol; - } break; } mdelay(10); diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index 50031a215a66..e265bace57d7 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -166,23 +166,12 @@ static int tpk_ioctl(struct tty_struct *tty, return 0; } -/* - * TTY operations hangup function. - */ -static void tpk_hangup(struct tty_struct *tty) -{ - struct ttyprintk_port *tpkp = tty->driver_data; - - tty_port_hangup(&tpkp->port); -} - static const struct tty_operations ttyprintk_ops = { .open = tpk_open, .close = tpk_close, .write = tpk_write, .write_room = tpk_write_room, .ioctl = tpk_ioctl, - .hangup = tpk_hangup, }; static struct tty_port_operations null_ops = { }; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index c2f1c921cb2c..226ccb7891d4 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -487,7 +487,7 @@ static struct port_buffer *get_inbuf(struct port *port) buf = virtqueue_get_buf(port->in_vq, &len); if (buf) { - buf->len = min_t(size_t, len, buf->size); + buf->len = len; buf->offset = 0; port->stats.bytes_received += len; } @@ -1752,7 +1752,7 @@ static void control_work_handler(struct work_struct *work) while ((buf = virtqueue_get_buf(vq, &len))) { spin_unlock(&portdev->c_ivq_lock); - buf->len = min_t(size_t, len, buf->size); + buf->len = len; buf->offset = 0; handle_control_message(vq->vdev, portdev, buf); diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 8a42a9c2a8f2..14af5c916c9c 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -263,7 +263,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev) return ret; err_reg: - of_node_put(s2mps11_clks[0].clk_np); while (--i >= 0) clkdev_drop(s2mps11_clks[i].lookup); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 02fbf3483173..d859b34d2b60 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3716,28 +3716,32 @@ EXPORT_SYMBOL_GPL(clk_notifier_register); */ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) { - struct clk_notifier *cn; - int ret = -ENOENT; + struct clk_notifier *cn = NULL; + int ret = -EINVAL; if (!clk || !nb) return -EINVAL; clk_prepare_lock(); - list_for_each_entry(cn, &clk_notifier_list, node) { - if (cn->clk == clk) { - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); + list_for_each_entry(cn, &clk_notifier_list, node) + if (cn->clk == clk) + break; - clk->core->notifier_count--; + if (cn->clk == clk) { + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); - /* XXX the notifier code should handle this better */ - if (!cn->notifier_head.head) { - srcu_cleanup_notifier_head(&cn->notifier_head); - list_del(&cn->node); - kfree(cn); - } - break; + clk->core->notifier_count--; + + /* XXX the notifier code should handle this better */ + if (!cn->notifier_head.head) { + srcu_cleanup_notifier_head(&cn->notifier_head); + list_del(&cn->node); + kfree(cn); } + + } else { + ret = -ENOENT; } clk_prepare_unlock(); diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index 50b1138aaad7..664edf0708ea 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c @@ -138,7 +138,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, if (parent_rate == 0 || rate == 0) return -EINVAL; - old_rate = clk_hw_get_rate(hw); + old_rate = rate; rate_set = meson_clk_get_pll_settings(pll, rate); if (!rate_set) diff --git a/drivers/clk/msm/clock-gcc-8998.c b/drivers/clk/msm/clock-gcc-8998.c index d568d099c528..e7ac30f0117c 100644 --- a/drivers/clk/msm/clock-gcc-8998.c +++ b/drivers/clk/msm/clock-gcc-8998.c @@ -2207,6 +2207,18 @@ static struct branch_clk gcc_mss_cfg_ahb_clk = { }, }; +static struct branch_clk gcc_mss_q6_bimc_axi_clk = { + .cbcr_reg = GCC_MSS_Q6_BIMC_AXI_CBCR, + .has_sibling = 1, + .base = &virt_base, + .c = { + .dbg_name = "gcc_mss_q6_bimc_axi_clk", + .always_on = true, + .ops = &clk_ops_branch, + CLK_INIT(gcc_mss_q6_bimc_axi_clk.c), + }, +}; + static struct branch_clk gcc_mss_mnoc_bimc_axi_clk = { .cbcr_reg = GCC_MSS_MNOC_BIMC_AXI_CBCR, .has_sibling = 1, @@ -2394,6 +2406,7 @@ static struct mux_clk gcc_debug_mux = { { &gcc_dcc_ahb_clk.c, 0x0119 }, { &ipa_clk.c, 0x011b }, { &gcc_mss_cfg_ahb_clk.c, 0x011f }, + { &gcc_mss_q6_bimc_axi_clk.c, 0x0124 }, { &gcc_mss_mnoc_bimc_axi_clk.c, 0x0120 }, { &gcc_mss_snoc_axi_clk.c, 0x0123 }, { &gcc_gpu_cfg_ahb_clk.c, 0x013b }, @@ -2633,6 +2646,7 @@ static struct clk_lookup msm_clocks_gcc_8998[] = { CLK_LIST(gcc_prng_ahb_clk), CLK_LIST(gcc_boot_rom_ahb_clk), CLK_LIST(gcc_mss_cfg_ahb_clk), + CLK_LIST(gcc_mss_q6_bimc_axi_clk), CLK_LIST(gcc_mss_mnoc_bimc_axi_clk), CLK_LIST(gcc_mss_snoc_axi_clk), CLK_LIST(gcc_hdmi_clkref_clk), diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index e1ed0760396e..4aa298c80679 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -115,6 +116,7 @@ enum clk_osm_trace_packet_id { #define WDOG_DOMAIN_PSTATE_STATUS 0x1c00 #define WDOG_PROGRAM_COUNTER 0x1c74 +#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN BIT(8) #define PLL_MODE 0x0 #define PLL_L_VAL 0x4 #define PLL_USER_CTRL 0xC @@ -374,6 +376,8 @@ struct clk_osm { u32 cycle_counter_reads; u32 cycle_counter_delay; u32 cycle_counter_factor; + u64 total_cycle_counter; + u32 prev_cycle_counter; u32 l_val_base; u32 apcs_itm_present; u32 apcs_cfg_rcgr; @@ -2208,8 +2212,10 @@ static void clk_osm_setup_cycle_counters(struct clk_osm *c) val |= BIT(0); /* Setup OSM clock to XO ratio */ do_div(ratio, c->xo_clk_rate); - val |= BVAL(5, 1, ratio - 1); + val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN; clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG); + c->total_cycle_counter = 0; + c->prev_cycle_counter = 0; pr_debug("OSM to XO clock ratio: %d\n", ratio); } @@ -2682,6 +2688,38 @@ fail: return NULL; } +static u64 clk_osm_get_cpu_cycle_counter(int cpu) +{ + struct clk_osm *c; + u32 val; + unsigned long flags; + + if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c) + c = &pwrcl_clk; + else if (logical_cpu_to_clk(cpu) == &perfcl_clk.c) + c = &perfcl_clk; + else { + pr_err("no clock device for CPU=%d\n", cpu); + return 0; + } + + spin_lock_irqsave(&c->lock, flags); + val = clk_osm_read_reg_no_log(c, OSM_CYCLE_COUNTER_STATUS_REG); + + if (val < c->prev_cycle_counter) { + /* Handle counter overflow */ + c->total_cycle_counter += UINT_MAX - + c->prev_cycle_counter + val; + c->prev_cycle_counter = val; + } else { + c->total_cycle_counter += val - c->prev_cycle_counter; + c->prev_cycle_counter = val; + } + spin_unlock_irqrestore(&c->lock, flags); + + return c->total_cycle_counter; +} + static void populate_opp_table(struct platform_device *pdev) { int cpu; @@ -3193,6 +3231,9 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) u32 pte_efuse; char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0"; char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0"; + struct cpu_cycle_counter_cb cb = { + .get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter, + }; if (of_find_compatible_node(NULL, NULL, "qcom,cpu-clock-osm-msm8998-v1")) { @@ -3448,6 +3489,8 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev) of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); + register_cpu_cycle_counter_cb(&cb); + pr_info("OSM driver inited\n"); put_online_cpus(); diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 53b1d300ed8f..1d2b9a1a9609 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -254,7 +254,6 @@ static const char *powersave_parents[] = { static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = { { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents), 11, 1, 0 }, - { } }; #define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c index 48e371035a63..1cebf253e8fd 100644 --- a/drivers/clk/socfpga/clk-gate-a10.c +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -158,7 +158,6 @@ static void __init __socfpga_gate_init(struct device_node *node, if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) { pr_err("%s: failed to find altr,sys-mgr regmap!\n", __func__); - kfree(socfpga_clk); return; } } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 14918896811d..aa7a6e6a15b6 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -107,7 +107,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); /* Check for GPIO_DB_CLK by its offset */ - if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) div = val + 1; else div = (1 << val); diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 02ff499e3653..66a0d0ed8b55 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -497,7 +497,6 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, { struct clk_init_data *init; struct fapll_synth *synth; - struct clk *clk = ERR_PTR(-ENOMEM); init = kzalloc(sizeof(*init), GFP_KERNEL); if (!init) @@ -520,19 +519,13 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, synth->hw.init = init; synth->clk_pll = pll_clk; - clk = clk_register(NULL, &synth->hw); - if (IS_ERR(clk)) { - pr_err("failed to register clock\n"); - goto free; - } - - return clk; + return clk_register(NULL, &synth->hw); free: kfree(synth); kfree(init); - return clk; + return ERR_PTR(-ENOMEM); } static void __init ti_fapll_setup(struct device_node *node) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 78da251a5f5d..bd55b201371e 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -311,24 +311,15 @@ static void arch_timer_evtstrm_enable(int divider) static void arch_timer_configure_evtstream(void) { - int evt_stream_div, lsb; - - /* - * As the event stream can at most be generated at half the frequency - * of the counter, use half the frequency when computing the divider. - */ - evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; - - /* - * Find the closest power of two to the divisor. If the adjacent bit - * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). - */ - lsb = fls(evt_stream_div) - 1; - if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) - lsb++; + int evt_stream_div, pos; + /* Find the closest power of two to the divisor */ + evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; + pos = fls(evt_stream_div); + if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) + pos--; /* enable event stream */ - arch_timer_evtstrm_enable(max(0, min(lsb, 15))); + arch_timer_evtstrm_enable(min(pos, 15)); } static void arch_counter_set_user_access(void) diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 23f125126fa8..f5ce2961c0d6 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -154,7 +154,10 @@ static void mxs_irq_clear(char *state) /* Clear pending interrupt */ timrot_irq_acknowledge(); - pr_debug("%s: changing mode to %s\n", __func__, state); + +#ifdef DEBUG + pr_info("%s: changing mode to %s\n", __func__, state) +#endif /* DEBUG */ } static int mxs_shutdown(struct clock_event_device *evt) diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c index 448b38d18507..9a181ee6ea8d 100644 --- a/drivers/cpufreq/cpu-boost.c +++ b/drivers/cpufreq/cpu-boost.c @@ -38,6 +38,11 @@ static bool input_boost_enabled; static unsigned int input_boost_ms = 40; module_param(input_boost_ms, uint, 0644); +static bool sched_boost_on_input; +module_param(sched_boost_on_input, bool, 0644); + +static bool sched_boost_active; + static struct delayed_work input_boost_rem; static u64 last_input_time; #define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC) @@ -166,7 +171,7 @@ static void update_policy_online(void) static void do_input_boost_rem(struct work_struct *work) { - unsigned int i; + unsigned int i, ret; struct cpu_sync *i_sync_info; /* Reset the input_boost_min for all CPUs in the system */ @@ -178,14 +183,25 @@ static void do_input_boost_rem(struct work_struct *work) /* Update policies for all online CPUs */ update_policy_online(); + + if (sched_boost_active) { + ret = sched_set_boost(0); + if (ret) + pr_err("cpu-boost: HMP boost disable failed\n"); + sched_boost_active = false; + } } static void do_input_boost(struct work_struct *work) { - unsigned int i; + unsigned int i, ret; struct cpu_sync *i_sync_info; cancel_delayed_work_sync(&input_boost_rem); + if (sched_boost_active) { + sched_set_boost(0); + sched_boost_active = false; + } /* Set the input_boost_min for all CPUs in the system */ pr_debug("Setting input boost min for all CPUs\n"); @@ -197,6 +213,15 @@ static void do_input_boost(struct work_struct *work) /* Update policies for all online CPUs */ update_policy_online(); + /* Enable scheduler boost to migrate tasks to big cluster */ + if (sched_boost_on_input) { + ret = sched_set_boost(1); + if (ret) + pr_err("cpu-boost: HMP boost enable failed\n"); + else + sched_boost_active = true; + } + queue_delayed_work(cpu_boost_wq, &input_boost_rem, msecs_to_jiffies(input_boost_ms)); } diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c index a181de57b8d1..ceddc4f43189 100644 --- a/drivers/cpufreq/cpufreq_times.c +++ b/drivers/cpufreq/cpufreq_times.c @@ -193,12 +193,10 @@ static void *uid_seq_start(struct seq_file *seq, loff_t *pos) static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - do { - (*pos)++; + (*pos)++; - if (*pos >= HASH_SIZE(uid_hash_table)) - return NULL; - } while (hlist_empty(&uid_hash_table[*pos])); + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; return &uid_hash_table[*pos]; } @@ -222,8 +220,7 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v) if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID) continue; - seq_put_decimal_ull(m, " ", - freqs->freq_table[i]); + seq_printf(m, " %d", freqs->freq_table[i]); } } seq_putc(m, '\n'); @@ -232,16 +229,13 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v) rcu_read_lock(); hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { - if (uid_entry->max_state) { - seq_put_decimal_ull(m, "", uid_entry->uid); - seq_putc(m, ':'); - } + if (uid_entry->max_state) + seq_printf(m, "%d:", uid_entry->uid); for (i = 0; i < uid_entry->max_state; ++i) { - u64 time; if (freq_index_invalid(i)) continue; - time = cputime_to_clock_t(uid_entry->time_in_state[i]); - seq_put_decimal_ull(m, " ", time); + seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t( + uid_entry->time_in_state[i])); } if (uid_entry->max_state) seq_putc(m, '\n'); @@ -262,13 +256,13 @@ static int concurrent_time_seq_show(struct seq_file *m, void *v, hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { atomic64_t *times = get_times(uid_entry->concurrent_times); - seq_put_decimal_ull(m, "", (u64)uid_entry->uid); + seq_put_decimal_ull(m, 0, (u64)uid_entry->uid); seq_putc(m, ':'); for (i = 0; i < num_possible_cpus; ++i) { u64 time = cputime_to_clock_t(atomic64_read(×[i])); - seq_put_decimal_ull(m, " ", time); + seq_put_decimal_ull(m, ' ', time); } seq_putc(m, '\n'); } @@ -286,7 +280,7 @@ static inline atomic64_t *get_active_times(struct concurrent_times *times) static int concurrent_active_time_seq_show(struct seq_file *m, void *v) { if (v == uid_hash_table) { - seq_put_decimal_ull(m, "cpus: ", num_possible_cpus()); + seq_printf(m, "cpus: %d", num_possible_cpus()); seq_putc(m, '\n'); } @@ -312,18 +306,18 @@ static int concurrent_policy_time_seq_show(struct seq_file *m, void *v) continue; if (freqs != last_freqs) { if (last_freqs) { - seq_put_decimal_ull(m, ": ", cnt); + seq_printf(m, ": %d", cnt); seq_putc(m, ' '); cnt = 0; } - seq_put_decimal_ull(m, "policy", i); + seq_printf(m, "policy%d", i); last_freqs = freqs; } cnt++; } if (last_freqs) { - seq_put_decimal_ull(m, ": ", cnt); + seq_printf(m, ": %d", cnt); seq_putc(m, '\n'); } } diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index ad743f2f31e7..1608f7105c9f 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -104,13 +104,6 @@ out_put_node: } module_init(hb_cpufreq_driver_init); -static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = { - { .compatible = "calxeda,highbank" }, - { .compatible = "calxeda,ecx-2000" }, - { }, -}; -MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match); - MODULE_AUTHOR("Mark Langsdorf "); MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 53226f33ea98..15fcf2cac971 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1187,7 +1187,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; - policy->iowait_boost_enable = true; /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c index 367cb1615c30..262581b3318d 100644 --- a/drivers/cpufreq/ls1x-cpufreq.c +++ b/drivers/cpufreq/ls1x-cpufreq.c @@ -217,7 +217,6 @@ static struct platform_driver ls1x_cpufreq_platdrv = { module_platform_driver(ls1x_cpufreq_platdrv); -MODULE_ALIAS("platform:ls1x-cpufreq"); MODULE_AUTHOR("Kelvin Cheung "); MODULE_DESCRIPTION("Loongson 1 CPUFreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 59f16807921a..0b5bf135b090 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -887,9 +887,9 @@ static int get_transition_latency(struct powernow_k8_data *data) /* Take a frequency, and issue the fid/vid transition command */ static int transition_frequency_fidvid(struct powernow_k8_data *data, - unsigned int index, - struct cpufreq_policy *policy) + unsigned int index) { + struct cpufreq_policy *policy; u32 fid = 0; u32 vid = 0; int res; @@ -921,6 +921,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); + policy = cpufreq_cpu_get(smp_processor_id()); + cpufreq_cpu_put(policy); + cpufreq_freq_transition_begin(policy, &freqs); res = transition_fid_vid(data, fid, vid); cpufreq_freq_transition_end(policy, &freqs, res); @@ -975,7 +978,7 @@ static long powernowk8_target_fn(void *arg) powernow_k8_acpi_pst_values(data, newstate); - ret = transition_frequency_fidvid(data, newstate, pol); + ret = transition_frequency_fidvid(data, newstate); if (ret) { pr_err("transition frequency failed\n"); diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 98f762cca901..de5e89b2eaaa 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -119,7 +119,6 @@ static struct platform_driver scpi_cpufreq_platdrv = { }; module_platform_driver(scpi_cpufreq_platdrv); -MODULE_ALIAS("platform:scpi-cpufreq"); MODULE_AUTHOR("Sudeep Holla "); MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index fc3756875bb3..1eaef20e5ed5 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,31 @@ #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) static remote_spinlock_t scm_handoff_lock; +enum { + MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), + MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1), +}; + +enum debug_event { + CPU_ENTER, + CPU_EXIT, + CLUSTER_ENTER, + CLUSTER_EXIT, + PRE_PC_CB, + CPU_HP_STARTING, + CPU_HP_DYING, +}; + +struct lpm_debug { + cycle_t time; + enum debug_event evt; + int cpu; + uint32_t arg1; + uint32_t arg2; + uint32_t arg3; + uint32_t arg4; +}; + struct lpm_cluster *lpm_root_node; #define MAXSAMPLES 5 @@ -95,6 +121,9 @@ static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster); static bool suspend_in_progress; static struct hrtimer lpm_hrtimer; static struct hrtimer histtimer; +static struct lpm_debug *lpm_debug; +static phys_addr_t lpm_debug_phys; +static const int num_dbg_elements = 0x100; static int lpm_cpu_callback(struct notifier_block *cpu_nb, unsigned long action, void *hcpu); @@ -282,6 +311,31 @@ int lpm_get_latency(struct latency_level *level, uint32_t *latency) } EXPORT_SYMBOL(lpm_get_latency); +static void update_debug_pc_event(enum debug_event event, uint32_t arg1, + uint32_t arg2, uint32_t arg3, uint32_t arg4) +{ + struct lpm_debug *dbg; + int idx; + static DEFINE_SPINLOCK(debug_lock); + static int pc_event_index; + + if (!lpm_debug) + return; + + spin_lock(&debug_lock); + idx = pc_event_index++; + dbg = &lpm_debug[idx & (num_dbg_elements - 1)]; + + dbg->evt = event; + dbg->time = arch_counter_get_cntvct(); + dbg->cpu = raw_smp_processor_id(); + dbg->arg1 = arg1; + dbg->arg2 = arg2; + dbg->arg3 = arg3; + dbg->arg4 = arg4; + spin_unlock(&debug_lock); +} + static int lpm_cpu_callback(struct notifier_block *cpu_nb, unsigned long action, void *hcpu) { @@ -290,10 +344,16 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_DYING: + update_debug_pc_event(CPU_HP_DYING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu), NR_LPM_LEVELS, false, 0); break; case CPU_STARTING: + update_debug_pc_event(CPU_HP_STARTING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu), NR_LPM_LEVELS, false, 0); break; @@ -1070,6 +1130,9 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, } if (idx != cluster->default_level) { + update_debug_pc_event(CLUSTER_ENTER, idx, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); trace_cluster_enter(cluster->cluster_name, idx, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1123,6 +1186,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, /* Notify cluster enter event after successfully config completion */ cluster_notify(cluster, level, true); + sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0); + cluster->last_level = idx; if (predicted && (idx < (cluster->nlevels - 1))) { @@ -1275,6 +1340,9 @@ static void cluster_unprepare(struct lpm_cluster *cluster, suspend_wake_time = 0; } + update_debug_pc_event(CLUSTER_EXIT, cluster->last_level, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], from_idle); trace_cluster_exit(cluster->cluster_name, cluster->last_level, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); @@ -1289,6 +1357,8 @@ static void cluster_unprepare(struct lpm_cluster *cluster, BUG_ON(ret); } + sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0); + cluster_notify(cluster, &cluster->levels[last_level], false); if (from_idle) @@ -1415,9 +1485,13 @@ bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) state_id |= (power_state | affinity_level | cluster->cpu->levels[idx].psci_id); + update_debug_pc_event(CPU_ENTER, state_id, + 0xdeaffeed, 0xdeaffeed, true); stop_critical_timings(); success = !arm_cpuidle_suspend(state_id); start_critical_timings(); + update_debug_pc_event(CPU_EXIT, state_id, + success, 0xdeaffeed, true); return success; } } @@ -1440,9 +1514,13 @@ bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) state_id |= (power_state | affinity_level | cluster->cpu->levels[idx].psci_id); + update_debug_pc_event(CPU_ENTER, state_id, + 0xdeaffeed, 0xdeaffeed, true); stop_critical_timings(); success = !arm_cpuidle_suspend(state_id); start_critical_timings(); + update_debug_pc_event(CPU_EXIT, state_id, + success, 0xdeaffeed, true); return success; } } @@ -1516,6 +1594,10 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, if (idx < 0) return -EINVAL; + pwr_params = &cluster->cpu->levels[idx].pwr; + sched_set_cpu_cstate(smp_processor_id(), idx + 1, + pwr_params->energy_overhead, pwr_params->latency_us); + pwr_params = &cluster->cpu->levels[idx].pwr; cpu_prepare(cluster, idx, true); @@ -1536,6 +1618,7 @@ exit: cluster_unprepare(cluster, cpumask, idx, true, end_time); cpu_unprepare(cluster, idx, true); + sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0); end_time = ktime_to_ns(ktime_get()) - start_time; do_div(end_time, 1000); dev->last_residency = end_time; @@ -1761,6 +1844,9 @@ static int lpm_suspend_enter(suspend_state_t state) } cpu_prepare(cluster, idx, false); cluster_prepare(cluster, cpumask, idx, false, 0); + if (idx > 0) + update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, + 0xdeaffeed, false); /* * Print the clocks which are enabled during system suspend @@ -1773,6 +1859,10 @@ static int lpm_suspend_enter(suspend_state_t state) BUG_ON(!use_psci); psci_enter_sleep(cluster, idx, true); + if (idx > 0) + update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed, + false); + cluster_unprepare(cluster, cpumask, idx, false, 0); cpu_unprepare(cluster, idx, false); return 0; @@ -1788,7 +1878,9 @@ static const struct platform_suspend_ops lpm_suspend_ops = { static int lpm_probe(struct platform_device *pdev) { int ret; + int size; struct kobject *module_kobj = NULL; + struct md_region md_entry; get_online_cpus(); lpm_root_node = lpm_of_parse_cluster(pdev); @@ -1821,6 +1913,9 @@ static int lpm_probe(struct platform_device *pdev) return ret; } + size = num_dbg_elements * sizeof(struct lpm_debug); + lpm_debug = dma_alloc_coherent(&pdev->dev, size, + &lpm_debug_phys, GFP_KERNEL); register_cluster_lpm_stats(lpm_root_node, NULL); ret = cluster_cpuidle_register(lpm_root_node); @@ -1846,6 +1941,14 @@ static int lpm_probe(struct platform_device *pdev) goto failed; } + /* Add lpm_debug to Minidump*/ + strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name)); + md_entry.virt_addr = (uintptr_t)lpm_debug; + md_entry.phys_addr = lpm_debug_phys; + md_entry.size = size; + if (msm_minidump_add_region(&md_entry)) + pr_info("Failed to add lpm_debug in Minidump\n"); + return 0; failed: free_cluster_node(lpm_root_node); @@ -1931,6 +2034,8 @@ enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu) * It must be acquired before releasing the cluster lock. */ unlock_and_return: + update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef, + 0xdeadbeef); trace_pre_pc_cb(retflag); remote_spin_lock_rlock_id(&scm_handoff_lock, REMOTE_SPINLOCK_TID_START + cpu); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 34c4a61a954f..e7e92ed34f0c 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -413,7 +413,6 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device) &kdev->kobj, "state%d", i); if (ret) { kobject_put(&kobj->kobj); - kfree(kobj); goto error_state; } kobject_uevent(&kobj->kobj, KOBJ_ADD); @@ -544,7 +543,6 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) &kdev->kobj, "driver"); if (ret) { kobject_put(&kdrv->kobj); - kfree(kdrv); return ret; } @@ -631,6 +629,7 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) if (!kdev) return -ENOMEM; kdev->dev = dev; + dev->kobj_dev = kdev; init_completion(&kdev->kobj_unregister); @@ -638,11 +637,9 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) "cpuidle"); if (error) { kobject_put(&kdev->kobj); - kfree(kdev); return error; } - dev->kobj_dev = kdev; kobject_uevent(&kdev->kobj, KOBJ_ADD); return 0; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8d0613170d57..13657105cfb9 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -334,7 +334,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) buf1 = buf->next; phys1 = buf->phys_next; - dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir); + dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); dma_pool_free(buffer_pool, buf, phys); buf = buf1; phys = phys1; diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c index 53c07aacc145..4cf95b90a2df 100644 --- a/drivers/crypto/msm/qce.c +++ b/drivers/crypto/msm/qce.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2010-2016, 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -768,11 +768,6 @@ static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req, switch (q_req->alg) { case CIPHER_ALG_DES: if (q_req->mode != QCE_MODE_ECB) { - if (ivsize > MAX_IV_LENGTH) { - pr_err("%s: error: Invalid length parameter\n", - __func__); - return -EINVAL; - } _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); writel_relaxed(enciv32[0], pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index ba52a853965b..7740a8c59126 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -4683,7 +4683,7 @@ again: pce_dev->intr_cadence = 0; atomic_set(&pce_dev->bunch_cmd_seq, 0); atomic_set(&pce_dev->last_intr_seq, 0); - pce_dev->cadence_flag = !pce_dev->cadence_flag; + pce_dev->cadence_flag = ~pce_dev->cadence_flag; } } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index b83e8970a2db..1a8dc76e117e 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -167,19 +167,15 @@ static struct dcp *global_sdcp; static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) { - int dma_err; struct dcp *sdcp = global_sdcp; const int chan = actx->chan; uint32_t stat; unsigned long ret; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; + dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc), DMA_TO_DEVICE); - dma_err = dma_mapping_error(sdcp->dev, desc_phys); - if (dma_err) - return dma_err; - reinit_completion(&sdcp->completion[chan]); /* Clear status register. */ @@ -217,29 +213,18 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx) static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct ablkcipher_request *req, int init) { - dma_addr_t key_phys, src_phys, dst_phys; struct dcp *sdcp = global_sdcp; struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan]; struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); int ret; - key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, - 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); - ret = dma_mapping_error(sdcp->dev, key_phys); - if (ret) - return ret; - - src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, - DCP_BUF_SZ, DMA_TO_DEVICE); - ret = dma_mapping_error(sdcp->dev, src_phys); - if (ret) - goto err_src; - - dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, - DCP_BUF_SZ, DMA_FROM_DEVICE); - ret = dma_mapping_error(sdcp->dev, dst_phys); - if (ret) - goto err_dst; + dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key, + 2 * AES_KEYSIZE_128, + DMA_TO_DEVICE); + dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf, + DCP_BUF_SZ, DMA_TO_DEVICE); + dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf, + DCP_BUF_SZ, DMA_FROM_DEVICE); if (actx->fill % AES_BLOCK_SIZE) { dev_err(sdcp->dev, "Invalid block size!\n"); @@ -277,12 +262,10 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, ret = mxs_dcp_start_dma(actx); aes_done_run: - dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); -err_dst: - dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); -err_src: dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128, DMA_TO_DEVICE); + dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE); + dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE); return ret; } @@ -297,20 +280,21 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; - int dst_nents = sg_nents(dst); + const int nents = sg_nents(req->src); const int out_off = DCP_BUF_SZ; uint8_t *in_buf = sdcp->coh->aes_in_buf; uint8_t *out_buf = sdcp->coh->aes_out_buf; + uint8_t *out_tmp, *src_buf, *dst_buf = NULL; uint32_t dst_off = 0; - uint8_t *src_buf = NULL; uint32_t last_out_len = 0; uint8_t *key = sdcp->coh->aes_key; int ret = 0; - unsigned int i, len, clen, tlen = 0; + int split = 0; + unsigned int i, len, clen, rem = 0, tlen = 0; int init = 0; bool limit_hit = false; @@ -328,7 +312,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, sg_nents(src), i) { + for_each_sg(req->src, src, nents, i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; @@ -353,17 +337,34 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) * submit the buffer. */ if (actx->fill == out_off || sg_is_last(src) || - limit_hit) { + limit_hit) { ret = mxs_dcp_run_aes(actx, req, init); if (ret) return ret; init = 0; - sg_pcopy_from_buffer(dst, dst_nents, out_buf, - actx->fill, dst_off); - dst_off += actx->fill; + out_tmp = out_buf; last_out_len = actx->fill; - actx->fill = 0; + while (dst && actx->fill) { + if (!split) { + dst_buf = sg_virt(dst); + dst_off = 0; + } + rem = min(sg_dma_len(dst) - dst_off, + actx->fill); + + memcpy(dst_buf + dst_off, out_tmp, rem); + out_tmp += rem; + dst_off += rem; + actx->fill -= rem; + + if (dst_off == sg_dma_len(dst)) { + dst = sg_next(dst); + split = 0; + } else { + split = 1; + } + } } } while (len); @@ -569,10 +570,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req) dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf, DCP_BUF_SZ, DMA_TO_DEVICE); - ret = dma_mapping_error(sdcp->dev, buf_phys); - if (ret) - return ret; - /* Fill in the DMA descriptor. */ desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE | MXS_DCP_CONTROL0_INTERRUPT | @@ -605,10 +602,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req) if (rctx->fini) { digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf, DCP_SHA_PAY_SZ, DMA_FROM_DEVICE); - ret = dma_mapping_error(sdcp->dev, digest_phys); - if (ret) - goto done_run; - desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM; desc->payload = digest_phys; } diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c index 1b8c87770645..cddc6d8b55d9 100644 --- a/drivers/crypto/nx/nx-842-pseries.c +++ b/drivers/crypto/nx/nx-842-pseries.c @@ -553,15 +553,13 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata) * The status field indicates if the device is enabled when the status * is 'okay'. Otherwise the device driver will be disabled. * - * @devdata: struct nx842_devdata to use for dev_info - * @prop: struct property point containing the maxsyncop for the update + * @prop - struct property point containing the maxsyncop for the update * * Returns: * 0 - Device is available * -ENODEV - Device is not available */ -static int nx842_OF_upd_status(struct nx842_devdata *devdata, - struct property *prop) +static int nx842_OF_upd_status(struct property *prop) { const char *status = (const char *)prop->value; @@ -775,7 +773,7 @@ static int nx842_OF_upd(struct property *new_prop) goto out; /* Perform property updates */ - ret = nx842_OF_upd_status(new_devdata, status); + ret = nx842_OF_upd_status(status); if (ret) goto error_out; @@ -1088,7 +1086,6 @@ static struct vio_device_id nx842_vio_driver_ids[] = { {"ibm,compression-v1", "ibm,compression"}, {"", ""}, }; -MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids); static struct vio_driver nx842_vio_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index be82186a8afb..7e9a44cee425 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1630,7 +1630,7 @@ static void omap_sham_done_task(unsigned long data) goto finish; } } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { - if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { + if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); if (dd->err) { err = dd->err; diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 06b35edb0d43..d873eeecc363 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -121,7 +121,6 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) struct service_hndl *service; struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; - int ret; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -188,9 +187,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev) } hw_data->enable_error_correction(accel_dev); - ret = hw_data->enable_vf2pf_comms(accel_dev); + hw_data->enable_vf2pf_comms(accel_dev); - return ret; + return 0; } EXPORT_SYMBOL_GPL(adf_dev_init); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 7e45c21a6165..5fdbad809343 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -218,13 +218,6 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); - if (val != msg) { - dev_dbg(&GET_DEV(accel_dev), - "Collision - PFVF CSR overwritten by remote function\n"); - ret = -EIO; - goto out; - } - if (val & int_bit) { dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); val &= ~int_bit; @@ -391,8 +384,6 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev) msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT; BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255); - reinit_completion(&accel_dev->vf.iov_msg_completion); - /* Send request from VF to PF */ ret = adf_iov_putmsg(accel_dev, msg, 0); if (ret) { diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 78f0942a3270..3865ae8d96d9 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -198,7 +198,6 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); - ring->base_addr = NULL; return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 5e5003379281..380e761801a7 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -1210,11 +1210,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle, pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr); return -EINVAL; } - status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); - if (status) { - pr_err("QAT: failed to read register"); - return status; - } + qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval); gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum); data16low = 0xffff & data; data16hi = 0xffff & (data >> 0x10); diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 28e642959a9a..923bb1988973 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -360,6 +360,7 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, return 0; } +#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, struct icp_qat_uof_initmem *init_mem) { diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c index ddbb43da1a13..5570f78795c1 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c @@ -315,31 +315,18 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) ret = adf_isr_alloc_msix_entry_table(accel_dev); if (ret) + return ret; + if (adf_enable_msix(accel_dev)) goto err_out; - ret = adf_enable_msix(accel_dev); - if (ret) - goto err_free_msix_table; - - ret = adf_setup_bh(accel_dev); - if (ret) - goto err_disable_msix; + if (adf_setup_bh(accel_dev)) + goto err_out; - ret = adf_request_irqs(accel_dev); - if (ret) - goto err_cleanup_bh; + if (adf_request_irqs(accel_dev)) + goto err_out; return 0; - -err_cleanup_bh: - adf_cleanup_bh(accel_dev); - -err_disable_msix: - adf_disable_msix(&accel_dev->accel_pci_dev); - -err_free_msix_table: - adf_isr_free_msix_entry_table(accel_dev); - err_out: - return ret; + adf_isr_resource_free(accel_dev); + return -EFAULT; } diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c index 32f9c2b79681..87c5d8adb125 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c @@ -243,25 +243,16 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) goto err_out; if (adf_setup_pf2vf_bh(accel_dev)) - goto err_disable_msi; + goto err_out; if (adf_setup_bh(accel_dev)) - goto err_cleanup_pf2vf_bh; + goto err_out; if (adf_request_msi_irq(accel_dev)) - goto err_cleanup_bh; + goto err_out; return 0; - -err_cleanup_bh: - adf_cleanup_bh(accel_dev); - -err_cleanup_pf2vf_bh: - adf_cleanup_pf2vf_bh(accel_dev); - -err_disable_msi: - adf_disable_msi(accel_dev); - err_out: + adf_vf_isr_resource_free(accel_dev); return -EFAULT; } diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index da2e4c193953..0c9973ec80eb 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -539,8 +539,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, ret = crypto_register_ahash(alg); if (ret) { - dev_err(qce->dev, "%s registration failed\n", base->cra_name); kfree(tmpl); + dev_err(qce->dev, "%s registration failed\n", base->cra_name); return ret; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index cfefa18bca28..1c8857e7db89 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -440,7 +440,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) /* * locate current (offending) descriptor */ -static __be32 current_desc_hdr(struct device *dev, int ch) +static u32 current_desc_hdr(struct device *dev, int ch) { struct talitos_private *priv = dev_get_drvdata(dev); int tail, iter; @@ -471,13 +471,13 @@ static __be32 current_desc_hdr(struct device *dev, int ch) /* * user diagnostics; report root cause of error based on execution unit status */ -static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr) +static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) { struct talitos_private *priv = dev_get_drvdata(dev); int i; if (!desc_hdr) - desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF)); + desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); switch (desc_hdr & DESC_HDR_SEL0_MASK) { case DESC_HDR_SEL0_AFEU: diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 7021b5b49c03..bca6b701c067 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -1022,7 +1022,6 @@ static int hash_hw_final(struct ahash_request *req) goto out; } } else if (req->nbytes == 0 && ctx->keylen > 0) { - ret = -EPERM; dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n", __func__); goto out; diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c index 4c010654204c..08de512e91c0 100644 --- a/drivers/devfreq/governor_msm_adreno_tz.c +++ b/drivers/devfreq/governor_msm_adreno_tz.c @@ -10,6 +10,11 @@ * GNU General Public License for more details. * */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2016 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include #include #include @@ -26,6 +31,7 @@ static DEFINE_SPINLOCK(tz_lock); static DEFINE_SPINLOCK(sample_lock); +static DEFINE_SPINLOCK(sample_load_lock); static DEFINE_SPINLOCK(suspend_lock); /* * FLOOR is 5msec to capture up to 3 re-draws @@ -58,9 +64,26 @@ static DEFINE_SPINLOCK(suspend_lock); #define TAG "msm_adreno_tz: " -static u64 suspend_time; -static u64 suspend_start; +#define USEC_PER_MINUTE (1*60*1000*1000) +#define NMAX (15*60*60+1) + +struct gpu_load_data { + unsigned long total_time; + unsigned long busy_time; + u64 update_time; +}; + +struct gpu_load_queue { + struct gpu_load_data *gpu_load; + int head; + int tail; +}; + +static u64 suspend_time, suspend_time_idd; +static u64 suspend_start, suspend_start_idd; static unsigned long acc_total, acc_relative_busy; +static unsigned long gpu_load_total, gpu_load_rel_busy; +static struct gpu_load_queue *gpu_load_infos; static struct msm_adreno_extended_profile *partner_gpu_profile; static void do_partner_start_event(struct work_struct *work); @@ -88,6 +111,21 @@ u64 suspend_time_ms(void) return time_diff; } +u64 suspend_time_ms_idd(void) +{ + u64 suspend_sampling_time; + u64 time_diff = 0; + + if (suspend_start_idd == 0) + return 0; + + suspend_sampling_time = (u64)ktime_to_ms(ktime_get()); + time_diff = suspend_sampling_time - suspend_start_idd; + /* Update the suspend_start sample again */ + suspend_start_idd = suspend_sampling_time; + return time_diff; +} + static ssize_t gpu_load_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -109,6 +147,135 @@ static ssize_t gpu_load_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%lu\n", sysfs_busy_perc); } +int findItem(int head, int tail, u64 refTime) +{ + int low = head, high = tail, middle; + u64 update_time; + + while (low < high) { + middle = (low + high) / 2; + update_time = + gpu_load_infos->gpu_load[middle % NMAX].update_time; + + if (update_time <= refTime) + low = middle + 1; + else if (update_time > refTime) + high = middle - 1; + } + + update_time = gpu_load_infos->gpu_load[low % NMAX].update_time; + if (update_time > refTime) + low = low - 1; + + if (low < tail) { + u64 l = gpu_load_infos->gpu_load[low % NMAX].update_time; + u64 r = gpu_load_infos->gpu_load[(low+1) % NMAX].update_time; + + if (r - refTime < refTime - l) + low = low + 1; + } + return low % NMAX; +} + +unsigned long cal_gpu_load(u64 current_time, u64 update_time, + u64 begin_time, int minutes) +{ + unsigned long sysfs_busy_perc; + unsigned long tmp_act_relative_busy, tmp_act_total; + int tail = gpu_load_infos->tail, head = gpu_load_infos->head; + + if (current_time - update_time > minutes * USEC_PER_MINUTE) { + sysfs_busy_perc = 0; + } else { + int tmpIndex = head; + + if (current_time - begin_time <= minutes * USEC_PER_MINUTE) { + tmpIndex = head; + } else { + u64 begin = current_time - minutes * USEC_PER_MINUTE; + int tmpTail = (tail < head) ? tail + NMAX : tail; + + tmpIndex = findItem(head, tmpTail-1, begin); + } + + tmp_act_relative_busy = + gpu_load_infos->gpu_load[tail-1].busy_time - + gpu_load_infos->gpu_load[tmpIndex].busy_time; + + tmp_act_total = current_time - + gpu_load_infos->gpu_load[tmpIndex].update_time; + + sysfs_busy_perc = (tmp_act_relative_busy * 100 * 10) / + tmp_act_total; + } + return sysfs_busy_perc; +} + +void convert_int_to_string(unsigned long perc, char *str, unsigned int size) +{ + char low[6] = "0"; + char mid[] = "."; + + snprintf(str, sizeof(low), "%lu", (perc / 10)); + snprintf(low, sizeof(low), "%lu", (perc % 10)); + strlcat(str, mid, size); + strlcat(str, low, size); +} + +static ssize_t gpu_period_load_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long sysfs_busy_perc[3]; + int tail, head; + char load[3][6] = {"0", "0", "0"}; + int i; + unsigned int size; + spin_lock(&sample_load_lock); + tail = gpu_load_infos->tail; + head = gpu_load_infos->head; + + if (tail == head) { + sysfs_busy_perc[0] = 0; + sysfs_busy_perc[1] = 0; + sysfs_busy_perc[2] = 0; + } else { + u64 current_time = (u64)ktime_to_us(ktime_get()); + u64 update_time = gpu_load_infos->gpu_load[tail-1].update_time; + u64 begin_time = gpu_load_infos->gpu_load[head].update_time; + + sysfs_busy_perc[0] = cal_gpu_load(current_time, + update_time, begin_time, 1); + sysfs_busy_perc[1] = cal_gpu_load(current_time, + update_time, begin_time, 5); + sysfs_busy_perc[2] = cal_gpu_load(current_time, + update_time, begin_time, 15); + } + for (i = 0; i < 3; i++) { + size = sizeof(load[i]); + convert_int_to_string(sysfs_busy_perc[i], load[i], size); + } + + spin_unlock(&sample_load_lock); + return snprintf(buf, PAGE_SIZE, "%s %s %s\n", + load[0], + load[1], + load[2] + ); +} + +static ssize_t gpu_load_idd_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long busy, total; + spin_lock(&sample_load_lock); + busy = gpu_load_rel_busy; + total = gpu_load_total; + spin_unlock(&sample_load_lock); + return snprintf(buf, PAGE_SIZE, "%lu %lu\n", busy, total); +} + /* * Returns the time in ms for which gpu was in suspend state * since last time the entry is read. @@ -134,7 +301,30 @@ static ssize_t suspend_time_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%llu\n", time_diff); } +static ssize_t suspend_time_idd_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 time_diff = 0; + + spin_lock(&suspend_lock); + time_diff = suspend_time_ms_idd(); + /* + * Adding the previous suspend time also as the gpu + * can go and come out of suspend states in between + * reads also and we should have the total suspend + * since last read. + */ + suspend_time_idd += time_diff; + spin_unlock(&suspend_lock); + + return snprintf(buf, PAGE_SIZE, "%llu\n", suspend_time_idd); +} + static DEVICE_ATTR(gpu_load, 0444, gpu_load_show, NULL); +static DEVICE_ATTR(gpu_period_load, 0444, gpu_period_load_show, NULL); +static DEVICE_ATTR(gpu_load_idd, 0444, gpu_load_idd_show, NULL); +static DEVICE_ATTR(gpu_suspend_idd, 0444, suspend_time_idd_show, NULL); static DEVICE_ATTR(suspend_time, 0444, suspend_time_show, @@ -142,10 +332,30 @@ static DEVICE_ATTR(suspend_time, 0444, static const struct device_attribute *adreno_tz_attr_list[] = { &dev_attr_gpu_load, + &dev_attr_gpu_period_load, + &dev_attr_gpu_load_idd, + &dev_attr_gpu_suspend_idd, &dev_attr_suspend_time, NULL }; +void store_work_load(unsigned long gpu_load_total, unsigned long gpu_load_busy) +{ + /* Queue item to gpu_load_queue */ + int index = gpu_load_infos->tail; + int head = gpu_load_infos->head; + + gpu_load_infos->gpu_load[index].total_time = gpu_load_total; + gpu_load_infos->gpu_load[index].busy_time = gpu_load_busy; + gpu_load_infos->gpu_load[index].update_time = + (u64)ktime_to_us(ktime_get()); + + if ((index + 1) % NMAX == head) + gpu_load_infos->head = (head + 1) % NMAX; + + gpu_load_infos->tail = (index + 1) % NMAX; +} + void compute_work_load(struct devfreq_dev_status *stats, struct devfreq_msm_adreno_tz_data *priv, struct devfreq *devfreq) @@ -164,6 +374,15 @@ void compute_work_load(struct devfreq_dev_status *stats, acc_relative_busy += busy; spin_unlock(&sample_lock); + + spin_lock(&sample_load_lock); + gpu_load_total += stats->total_time; + gpu_load_rel_busy += (stats->busy_time * stats->current_frequency) / + devfreq->profile->freq_table[0]; + + store_work_load(gpu_load_total, gpu_load_rel_busy); + + spin_unlock(&sample_load_lock); } /* Trap into the TrustZone, and call funcs there. */ @@ -234,6 +453,14 @@ static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data, return ret; } +static void tz_init_gpuloadinfos(void) +{ + gpu_load_infos = kzalloc(sizeof(struct gpu_load_queue), GFP_KERNEL); + gpu_load_infos->head = gpu_load_infos->tail = 0; + gpu_load_infos->gpu_load = + kcalloc(NMAX, sizeof(struct gpu_load_data), GFP_KERNEL); +} + static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv) { unsigned int tz_ca_data[2]; @@ -273,6 +500,8 @@ static int tz_init(struct devfreq_msm_adreno_tz_data *priv, unsigned int *version, u32 size_version) { int ret; + + tz_init_gpuloadinfos(); /* Make sure all CMD IDs are avaialble */ if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, @@ -560,6 +789,7 @@ static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data) spin_lock(&suspend_lock); /* Collect the start sample for suspend time */ suspend_start = (u64)ktime_to_ms(ktime_get()); + suspend_start_idd = suspend_start; spin_unlock(&suspend_lock); } break; @@ -567,8 +797,10 @@ static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data) case DEVFREQ_GOV_RESUME: spin_lock(&suspend_lock); suspend_time += suspend_time_ms(); + suspend_time_idd += suspend_time_ms_idd(); /* Reset the suspend_start when gpu resumes */ suspend_start = 0; + suspend_start_idd = 0; spin_unlock(&suspend_lock); case DEVFREQ_GOV_INTERVAL: diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 41ce77dff9c6..35f8d70fd871 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -239,7 +239,7 @@ config INTEL_IDMA64 config INTEL_IOATDMA tristate "Intel I/OAT DMA support" - depends on PCI && X86_64 && !UML + depends on PCI && X86_64 select DMA_ENGINE select DMA_ENGINE_RAID select DCA diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index eef1b93828c2..16d0daa058a5 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -72,14 +71,8 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, si = (const struct acpi_csrt_shared_info *)&grp[1]; - /* Match device by MMIO */ - if (si->mmio_base_low != lower_32_bits(mem) || - si->mmio_base_high != upper_32_bits(mem)) - return 0; - - /* Match device by Linux vIRQ */ - ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity); - if (ret != irq) + /* Match device by MMIO and IRQ */ + if (si->mmio_base_low != mem || si->gsi_interrupt != irq) return 0; dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index d0e85c65d146..8aa3ccf42e55 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -100,7 +100,6 @@ #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ -#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27) #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ @@ -157,7 +156,7 @@ #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) -#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */ +#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ @@ -233,15 +232,15 @@ struct at_xdmac { /* Linked List Descriptor */ struct at_xdmac_lld { - u32 mbr_nda; /* Next Descriptor Member */ - u32 mbr_ubc; /* Microblock Control Member */ - u32 mbr_sa; /* Source Address Member */ - u32 mbr_da; /* Destination Address Member */ - u32 mbr_cfg; /* Configuration Register */ - u32 mbr_bc; /* Block Control Register */ - u32 mbr_ds; /* Data Stride Register */ - u32 mbr_sus; /* Source Microblock Stride Register */ - u32 mbr_dus; /* Destination Microblock Stride Register */ + dma_addr_t mbr_nda; /* Next Descriptor Member */ + u32 mbr_ubc; /* Microblock Control Member */ + dma_addr_t mbr_sa; /* Source Address Member */ + dma_addr_t mbr_da; /* Destination Address Member */ + u32 mbr_cfg; /* Configuration Register */ + u32 mbr_bc; /* Block Control Register */ + u32 mbr_ds; /* Data Stride Register */ + u32 mbr_sus; /* Source Microblock Stride Register */ + u32 mbr_dus; /* Destination Microblock Stride Register */ }; /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ @@ -346,6 +345,9 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); + if (at_xdmac_chan_is_enabled(atchan)) + return; + /* Set transfer as active to not try to start it again. */ first->active_xfer = true; @@ -361,8 +363,7 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, */ if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - else if ((first->lld.mbr_ubc & - AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3) + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; @@ -427,12 +428,13 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); - list_add_tail(&desc->xfer_node, &atchan->xfers_list); - spin_unlock_irqrestore(&atchan->lock, irqflags); - dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", __func__, atchan, desc); + list_add_tail(&desc->xfer_node, &atchan->xfers_list); + if (list_is_singular(&atchan->xfers_list)) + at_xdmac_start_xfer(atchan, desc); + spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 6ea3e95c287b..e00c9b022964 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -11,7 +11,6 @@ config DW_DMAC_BIG_ENDIAN_IO config DW_DMAC tristate "Synopsys DesignWare AHB DMA platform driver" - depends on HAS_IOMEM select DW_DMAC_CORE select DW_DMAC_BIG_ENDIAN_IO if AVR32 default y if CPU_AT32AP7000 @@ -22,7 +21,6 @@ config DW_DMAC config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI - depends on HAS_IOMEM select DW_DMAC_CORE help Support the Synopsys DesignWare AHB DMA controller on the diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7c4b4c71d3a0..2209f75fdf05 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1335,7 +1335,6 @@ static int fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; - unsigned int i; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); @@ -1417,10 +1416,6 @@ static int fsldma_of_probe(struct platform_device *op) return 0; out_free_fdev: - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { - if (fdev->chan[i]) - fsl_dma_chan_remove(fdev->chan[i]); - } irq_dispose_mapping(fdev->irq); kfree(fdev); out_return: @@ -1441,7 +1436,6 @@ static int fsldma_of_remove(struct platform_device *op) if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } - irq_dispose_mapping(fdev->irq); iounmap(fdev->regs); kfree(fdev); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 548600ce6cc8..e39457f13d4d 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -723,6 +723,12 @@ static int mmp_pdma_config(struct dma_chan *dchan, chan->dir = cfg->direction; chan->dev_addr = addr; + /* FIXME: drivers should be ported over to use the filter + * function. Once that's done, the following two lines can + * be removed. + */ + if (cfg->slave_id) + chan->drcmr = cfg->slave_id; return 0; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 4a5dbf30605a..86c591481dfe 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -68,12 +68,8 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; ofdma_target = of_dma_find_controller(&dma_spec_target); - if (!ofdma_target) { - ofdma->dma_router->route_free(ofdma->dma_router->dev, - route_data); - chan = ERR_PTR(-EPROBE_DEFER); - goto err; - } + if (!ofdma_target) + return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); if (IS_ERR_OR_NULL(chan)) { @@ -84,7 +80,6 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, chan->route_data = route_data; } -err: /* * Need to put the node back since the ofdma->of_dma_route_allocate * has taken it for generating the new, translated dma_spec diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6ea993478ddd..7f66ae1945b2 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2531,15 +2531,13 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( for (i = 0; i < len / period_len; i++) { desc = pl330_get_desc(pch); if (!desc) { - unsigned long iflags; - dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); if (!first) return NULL; - spin_lock_irqsave(&pl330->pool_lock, iflags); + spin_lock_irqsave(&pl330->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, @@ -2549,7 +2547,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( list_move_tail(&first->node, &pl330->desc_pool); - spin_unlock_irqrestore(&pl330->pool_lock, iflags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return NULL; } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index ff2e28137a7b..4251e9ac0373 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -959,6 +959,13 @@ static void pxad_get_config(struct pxad_chan *chan, *dcmd |= PXA_DCMD_BURST16; else if (maxburst == 32) *dcmd |= PXA_DCMD_BURST32; + + /* FIXME: drivers should be ported over to use the filter + * function. Once that's done, the following two lines can + * be removed. + */ + if (chan->cfg.slave_id) + chan->drcmr = chan->cfg.slave_id; } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 687286f20512..10fcabad80f3 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -118,10 +118,8 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); - if (ret < 0) { + if (ret < 0) dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); - pm_runtime_put(schan->dev); - } pm_runtime_barrier(schan->dev); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 416057d9f0b6..cc8fc601ed47 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -863,8 +863,8 @@ static int usb_dmac_probe(struct platform_device *pdev) error: of_dma_controller_free(pdev->dev.of_node); -error_pm: pm_runtime_put(&pdev->dev); +error_pm: pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index e6d3ed1de374..0fede051f4e1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3715,9 +3715,6 @@ failure: kfree(base->lcla_pool.base_unaligned); - if (base->lcpa_base) - iounmap(base->lcpa_base); - if (base->phy_lcpa) release_mem_region(base->phy_lcpa, base->lcpa_size); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index e1a81e4f7403..929640981d8a 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -352,7 +352,7 @@ static int altr_sdram_probe(struct platform_device *pdev) if (irq < 0) { edac_printk(KERN_ERR, EDAC_MC, "No irq %d in DT\n", irq); - return irq; + return -ENODEV; } /* Arria10 has a 2nd IRQ */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index d6a94cf2501e..dc68394da682 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -199,7 +199,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems) else return (char *)ptr; - r = (unsigned long)ptr % align; + r = (unsigned long)p % align; if (r == 0) return (char *)ptr; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index a2ad988c9bdc..a4e1f6939c39 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -848,7 +848,7 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt) pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®); rc = ((reg << 6) | rc) << 26; - return rc | 0x3ffffff; + return rc | 0x1ffffff; } static u64 haswell_rir_limit(u32 reg) diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 091f03852dca..fc153aea2f6c 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -371,7 +371,7 @@ static int synps_edac_init_csrows(struct mem_ctl_info *mci) for (j = 0; j < csi->nr_channels; j++) { dimm = csi->channels[j]->dimm; - dimm->edac_mode = EDAC_SECDED; + dimm->edac_mode = EDAC_FLAG_SECDED; dimm->mtype = synps_edac_get_mtype(priv->baseaddr); dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; dimm->grain = SYNPS_EDAC_ERR_GRAIN; diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index d9f0971d8464..41f876414a18 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1868,7 +1868,7 @@ static int xgene_edac_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, i); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - rc = irq; + rc = -EINVAL; goto out_err; } rc = devm_request_irq(&pdev->dev, irq, diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index 02b3feb76ca9..44c499e1beee 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -1262,4 +1262,4 @@ module_platform_driver(max77693_muic_driver); MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver"); MODULE_AUTHOR("Chanwoo Choi "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:max77693-muic"); +MODULE_ALIAS("platform:extcon-max77693"); diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index a37c7257ccc7..3d6b42f61f56 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c @@ -780,4 +780,3 @@ module_platform_driver(max8997_muic_driver); MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver"); MODULE_AUTHOR("Donggeun Kim "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:max8997-muic"); diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c index 9b8c79bc3acd..f63f9961ac12 100644 --- a/drivers/extcon/extcon-sm5502.c +++ b/drivers/extcon/extcon-sm5502.c @@ -92,6 +92,7 @@ static struct reg_data sm5502_reg_data[] = { | SM5502_REG_INTM2_MHL_MASK, .invert = true, }, + { } }; /* List of detectable cables */ diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 0e2c9cb95255..15c362a2be1c 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -945,7 +945,6 @@ int extcon_dev_register(struct extcon_dev *edev) sizeof(*edev->nh) * edev->max_supported, GFP_KERNEL); if (!edev->nh) { ret = -ENOMEM; - device_unregister(&edev->dev); goto err_dev; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 40ed4d8c61f5..76b2d390f6ec 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -358,7 +358,6 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct client *client = file->private_data; spinlock_t *client_list_lock = &client->lynx->client_list_lock; struct nosy_stats stats; - int ret; switch (cmd) { case NOSY_IOC_GET_STATS: @@ -373,15 +372,11 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; case NOSY_IOC_START: - ret = -EBUSY; spin_lock_irq(client_list_lock); - if (list_empty(&client->link)) { - list_add_tail(&client->link, &client->lynx->client_list); - ret = 0; - } + list_add_tail(&client->link, &client->lynx->client_list); spin_unlock_irq(client_list_lock); - return ret; + return 0; case NOSY_IOC_STOP: spin_lock_irq(client_list_lock); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 91dbc6ae56cf..c0e54396f250 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -35,6 +35,8 @@ #define INDENT_SP " " +static char rcd_decode_str[CPER_REC_LEN]; + /* * CPER record ID need to be unique even after reboot, because record * ID is used as index for ERST storage, while CPER records from @@ -255,7 +257,8 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) return 0; - len = CPER_REC_LEN; + n = 0; + len = CPER_REC_LEN - 1; dmi_memdev_name(mem->mem_dev_handle, &bank, &device); if (bank && device) n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); @@ -264,6 +267,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) "DIMM location: not present. DMI handle: 0x%.4x ", mem->mem_dev_handle); + msg[n] = '\0'; return n; } @@ -291,7 +295,6 @@ const char *cper_mem_err_unpack(struct trace_seq *p, struct cper_mem_err_compact *cmem) { const char *ret = trace_seq_buffer_ptr(p); - char rcd_decode_str[CPER_REC_LEN]; if (cper_mem_err_location(cmem, rcd_decode_str)) trace_seq_printf(p, "%s", rcd_decode_str); @@ -306,7 +309,6 @@ static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, int len) { struct cper_mem_err_compact cmem; - char rcd_decode_str[CPER_REC_LEN]; /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ if (len == sizeof(struct cper_sec_mem_err_old) && diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 0affb47d028a..1d4d9bc8b69d 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -370,7 +370,7 @@ static int pcf857x_probe(struct i2c_client *client, * reset state. Otherwise it flags pins to be driven low. */ gpio->out = ~n_latch; - gpio->status = gpio->read(gpio->client); + gpio->status = gpio->out; status = gpiochip_add(&gpio->chip); if (status < 0) diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index ccfdf5a45998..8abeacac5885 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -764,11 +764,8 @@ err_disable_clk: static int zynq_gpio_remove(struct platform_device *pdev) { struct zynq_gpio *gpio = platform_get_drvdata(pdev); - int ret; - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) - dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); + pm_runtime_get_sync(&pdev->dev); gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 80e3b41294e5..1b3fda2331be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -404,9 +404,6 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); - if (!mode) - return NULL; - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -421,9 +418,6 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); - if (!mode) - return NULL; - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } @@ -841,7 +835,6 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); - amdgpu_get_native_mode(connector); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 3490d300bed2..31a676376d73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -340,7 +340,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus, void amdgpu_i2c_router_select_ddc_port(struct amdgpu_connector *amdgpu_connector) { - u8 val = 0; + u8 val; if (!amdgpu_connector->router.ddc_valid) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 014b87143837..062c23125b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -566,7 +566,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) DMA_BIDIRECTIONAL : DMA_TO_DEVICE; /* double check that we don't free the table twice */ - if (!ttm->sg || !ttm->sg->sgl) + if (!ttm->sg->sgl) return; /* free the sg table and pages again */ @@ -737,7 +737,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) if (gtt && gtt->userptr) { kfree(ttm->sg); - ttm->sg = NULL; ttm->page_flags &= ~TTM_PAGE_FLAG_SG; return; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a21365e172fe..edef90810c84 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4301,8 +4301,6 @@ static void drm_add_display_info(struct edid *edid, if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return; - info->color_formats |= DRM_COLOR_FORMAT_RGB444; - /* Get data from CEA blocks if present */ edid_ext = drm_find_cea_extension(edid); if (edid_ext) { @@ -4355,6 +4353,7 @@ static void drm_add_display_info(struct edid *edid, DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); + info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index e465d7127258..25c68e4dc7a5 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -2125,7 +2125,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev DRM_INFO("failed to retrieve link info, disabling eDP\n"); cdv_intel_dp_encoder_destroy(encoder); cdv_intel_dp_destroy(connector); - goto err_connector; + goto err_priv; } else { DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", intel_dp->dpcd[0], intel_dp->dpcd[1], diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index fc9a34ed58bd..e28107061148 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -279,8 +279,11 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) hdmi_dev = pci_get_drvdata(dev); i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); - if (!i2c_dev) - return -ENOMEM; + if (i2c_dev == NULL) { + DRM_ERROR("Can't allocate interface\n"); + ret = -ENOMEM; + goto exit; + } i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; i2c_dev->status = I2C_STAT_INIT; @@ -297,23 +300,16 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) oaktrail_hdmi_i2c_adapter.name, hdmi_dev); if (ret) { DRM_ERROR("Failed to request IRQ for I2C controller\n"); - goto free_dev; + goto err; } /* Adapter registration */ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); - if (ret) { - DRM_ERROR("Failed to add I2C adapter\n"); - goto free_irq; - } - - return 0; + return ret; -free_irq: - free_irq(dev->irq, hdmi_dev); -free_dev: +err: kfree(i2c_dev); - +exit: return ret; } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 15a909efe0c7..db98ab5cde3d 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -325,8 +325,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_err; - ret = -ENOMEM; - dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); if (!dev_priv->mmu) goto out_err; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 788c9258be36..5799356f6b6b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1719,8 +1719,6 @@ struct drm_i915_private { struct intel_uncore uncore; - struct mutex tlb_invalidate_lock; - struct i915_virtual_gpu vgpu; struct intel_guc guc; @@ -2068,9 +2066,6 @@ struct drm_i915_gem_object { */ unsigned int active:I915_NUM_RINGS; - unsigned long flags; -#define I915_BO_WAS_BOUND_BIT 0 - /** * This is set if the object has been written to since last bound * to the GTT diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 93416d185524..659b90657f36 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2212,85 +2212,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) kfree(obj->pages); } -#define _wait_for_us(COND, US, W) ({ \ - unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ - int ret__; \ - for (;;) { \ - bool expired__ = time_after(jiffies, timeout__); \ - if (COND) { \ - ret__ = 0; \ - break; \ - } \ - if (expired__) { \ - ret__ = -ETIMEDOUT; \ - break; \ - } \ - usleep_range((W), (W)*2); \ - } \ - ret__; \ -}) - -static int -__intel_wait_for_register_fw(struct drm_i915_private *dev_priv, - u32 reg, - const u32 mask, - const u32 value, - const unsigned int timeout_us, - const unsigned int timeout_ms) -{ -#define done ((I915_READ_FW(reg) & mask) == value) - int ret = _wait_for_us(done, timeout_us, 2); - if (ret) - ret = wait_for(done, timeout_ms); - return ret; -#undef done -} - -static void invalidate_tlbs(struct drm_i915_private *dev_priv) -{ - static const u32 gen8_regs[] = { - [RCS] = GEN8_RTCR, - [VCS] = GEN8_M1TCR, - [VCS2] = GEN8_M2TCR, - [VECS] = GEN8_VTCR, - [BCS] = GEN8_BTCR, - }; - enum intel_ring_id id; - - if (INTEL_INFO(dev_priv)->gen < 8) - return; - - mutex_lock(&dev_priv->tlb_invalidate_lock); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - - for (id = 0; id < I915_NUM_RINGS; id++) { - struct intel_engine_cs *engine = &dev_priv->ring[id]; - /* - * HW architecture suggest typical invalidation time at 40us, - * with pessimistic cases up to 100us and a recommendation to - * cap at 1ms. We go a bit higher just in case. - */ - const unsigned int timeout_us = 100; - const unsigned int timeout_ms = 4; - - if (!intel_ring_initialized(engine)) - continue; - - if (WARN_ON_ONCE(id >= ARRAY_SIZE(gen8_regs) || !gen8_regs[id])) - continue; - - I915_WRITE_FW(gen8_regs[id], 1); - if (__intel_wait_for_register_fw(dev_priv, - gen8_regs[id], 1, 0, - timeout_us, timeout_ms)) - DRM_ERROR_RATELIMITED("%s TLB invalidation did not complete in %ums!\n", - engine->name, timeout_ms); - } - - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev_priv->tlb_invalidate_lock); -} - int i915_gem_object_put_pages(struct drm_i915_gem_object *obj) { @@ -2309,14 +2230,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) * lists early. */ list_del(&obj->global_list); - if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - intel_runtime_pm_get(i915); - invalidate_tlbs(i915); - intel_runtime_pm_put(i915); - } - ops->put_pages(obj); obj->pages = NULL; @@ -5137,8 +5050,6 @@ i915_gem_load(struct drm_device *dev) i915_gem_shrinker_init(dev_priv); mutex_init(&dev_priv->fb_tracking.lock); - - mutex_init(&dev_priv->tlb_invalidate_lock); } void i915_gem_release(struct drm_device *dev, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b2bb0b268ea9..65a53ee398b8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3538,9 +3538,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, vma->bound |= bind_flags; - if (vma->obj) - set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 33a9b80da5dc..603d8cdfc5f1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1592,12 +1592,6 @@ enum skl_disp_power_wells { #define GEN7_TLB_RD_ADDR 0x4700 -#define GEN8_RTCR 0x4260 -#define GEN8_M1TCR 0x4264 -#define GEN8_M2TCR 0x4268 -#define GEN8_BTCR 0x426c -#define GEN8_VTCR 0x4270 - #if 0 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3e107e103d35..81bd84f9156b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2245,9 +2245,9 @@ static void snb_wm_latency_quirk(struct drm_device *dev) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); if (!changed) return; diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 74585ba16501..b9dc2ef64ed8 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -217,11 +217,6 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); - if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { - dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); - return; - } - drm_panel_prepare(imx_ldb_ch->panel); if (dual) { @@ -272,11 +267,6 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, unsigned long di_clk = mode->clock * 1000; int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder); - if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { - dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); - return; - } - if (mode->clock > 170000) { dev_warn(ldb->dev, "%s: mode exceeds 170 MHz pixel clock\n", __func__); diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index e3f07f58bbed..f9bed1058f38 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -36,10 +36,8 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) } phy_pdev = of_find_device_by_node(phy_node); - if (phy_pdev) { + if (phy_pdev) msm_dsi->phy = platform_get_drvdata(phy_pdev); - msm_dsi->phy_dev = &phy_pdev->dev; - } of_node_put(phy_node); @@ -48,6 +46,8 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) return -EPROBE_DEFER; } + msm_dsi->phy_dev = get_device(&phy_pdev->dev); + return 0; } diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 1be6dc196e97..81200e9be382 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -1095,7 +1095,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) int msm_edp_ctrl_init(struct msm_edp *edp) { struct edp_ctrl *ctrl = NULL; - struct device *dev; + struct device *dev = &edp->pdev->dev; int ret; if (!edp) { @@ -1103,7 +1103,6 @@ int msm_edp_ctrl_init(struct msm_edp *edp) return -EINVAL; } - dev = &edp->pdev->dev; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index d8c7b8a6a418..8e6c9b598a57 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -128,17 +128,9 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN; cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line); - /* - * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on - * the vsync_clk equating to roughly half the desired panel refresh rate. - * This is only necessary as stability fallback if interrupts from the - * panel arrive too late or not at all, but is currently used by default - * because these panel interrupts are not wired up yet. - */ mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg); mdp5_write(mdp5_kms, - REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal)); - + REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0); mdp5_write(mdp5_kms, REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay); mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 81d3481b2aa4..80dc06b4a884 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -387,7 +387,7 @@ static int msm_init_vram(struct drm_device *dev) ret = of_address_to_resource(node, 0, &r); if (ret) return ret; - size = r.end - r.start + 1; + size = r.end - r.start; DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); } else #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 58c310930bf2..78f520d05de9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -458,7 +458,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; - if (!ttm_dma || !ttm_dma->dma_address) + if (!ttm_dma) return; /* Don't waste time looping if the object is coherent */ @@ -478,7 +478,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; - if (!ttm_dma || !ttm_dma->dma_address) + if (!ttm_dma) return; /* Don't waste time looping if the object is coherent */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c index 381c59279d7f..cf8bc068e9b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c @@ -56,7 +56,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size) args->v0.count = 0; args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; - args->v0.pwrsrc = -ENODEV; + args->v0.pwrsrc = -ENOSYS; args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 4b571cc6bc70..7deb81b6dbac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); - if (!shadow_fetch(bios, mthd, image.base + image.size)) { + if (!shadow_fetch(bios, mthd, image.size)) { nvkm_debug(subdev, "%08x: fetch failed\n", image.base); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c index a3cede8df4fd..7cac8fe372b6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm204.c @@ -33,7 +33,7 @@ static void gm204_i2c_aux_fini(struct gm204_i2c_aux *aux) { struct nvkm_device *device = aux->base.pad->i2c->subdev.device; - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000); + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000); } static int @@ -54,10 +54,10 @@ gm204_i2c_aux_init(struct gm204_i2c_aux *aux) AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); return -EBUSY; } - } while (ctrl & 0x07010000); + } while (ctrl & 0x03010000); /* set some magic, and wait up to 1ms for it to appear */ - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq); + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); timeout = 1000; do { ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); @@ -67,7 +67,7 @@ gm204_i2c_aux_init(struct gm204_i2c_aux *aux) gm204_i2c_aux_fini(aux); return -EBUSY; } - } while ((ctrl & 0x07000000) != urep); + } while ((ctrl & 0x03000000) != urep); return 0; } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index bed686279882..8282ae0c4fc3 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -720,7 +720,6 @@ static int omap_dmm_probe(struct platform_device *dev) &omap_dmm->refill_pa, GFP_KERNEL); if (!omap_dmm->refill_va) { dev_err(&dev->dev, "could not allocate refill memory\n"); - ret = -ENOMEM; goto fail; } diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index 5f757328fced..d34bb4130ff0 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -57,8 +57,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, surf.height = args->height; surf.stride = pitch; surf.format = format; - surf.data = 0; - r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_VRAM, args->size, &surf, &qobj, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 845a6d54f45b..8e86cf7da614 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -192,8 +192,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741) && - !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) + (rdev->pdev->device == 0x6741)) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index c8baa06773df..8e1bf9ed8eff 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + /* disable mclk switching if the refresh is >120Hz, even if the * blanking period would allow it */ diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 230e2dcdf053..0c5b3eeff82d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2259,10 +2259,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) rdev->pm.default_power_state_index = state_index - 1; rdev->pm.power_state[state_index - 1].default_clock_mode = &rdev->pm.power_state[state_index - 1].clock_info[0]; - rdev->pm.power_state[state_index - 1].flags &= + rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index - 1].misc = 0; - rdev->pm.power_state[state_index - 1].misc2 = 0; + rdev->pm.power_state[state_index].misc = 0; + rdev->pm.power_state[state_index].misc2 = 0; } return state_index; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 6168ada4e3ad..41caf7da9054 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -503,7 +503,6 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *value = rdev->config.si.backend_enable_mask; } else { DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); - return -EINVAL; } break; case RADEON_INFO_MAX_SCLK: diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index eab985fdcfbd..b35ebabd6a9f 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -242,7 +242,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; - memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); size = radeon_bo_size(rdev->uvd.vcpu_bo); size -= rdev->uvd_fw->size; @@ -250,7 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) ptr = rdev->uvd.cpu_addr; ptr += rdev->uvd_fw->size; - memset_io((void __iomem *)ptr, 0, size); + memset(ptr, 0, size); return 0; } diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index cc811de31afa..0110d95522f3 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -37,7 +37,7 @@ static u8 *udl_get_edid(struct udl_device *udl) ret = usb_control_msg(udl->udev, usb_rcvctrlpipe(udl->udev, 0), (0x02), (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, - 1000); + HZ); if (ret < 1) { DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); goto error; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 0479f8718f7a..abf3c3974a49 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -233,7 +233,6 @@ err_ttm: err_vbufs: vgdev->vdev->config->del_vqs(vgdev->vdev); err_vqs: - dev->dev_private = NULL; kfree(vgdev); return ret; } diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index d1b55c9e9023..883a9810fbf4 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -407,8 +407,7 @@ unsigned int a5xx_preemption_pre_ibsubmit( /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1); - *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER) - ? 0 : 2); + *cmds++ = 2; return (unsigned int) (cmds - cmds_orig); } diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 201e63f5e9dc..2027ac66f737 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2008-2017,2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -281,7 +281,7 @@ static int ctx_print(struct seq_file *s, void *unused) ctx_type_str(drawctxt->type), drawctxt->base.priority, drawctxt->base.proc_priv->comm, - pid_nr(drawctxt->base.proc_priv->pid), + drawctxt->base.proc_priv->pid, drawctxt->base.tid); seq_puts(s, "flags: "); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 265befe31b5e..da9d4e17b7c7 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1696,7 +1696,7 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) #define pr_fault(_d, _c, fmt, args...) \ dev_err((_d)->dev, "%s[%d]: " fmt, \ _kgsl_context_comm((_c)->context), \ - pid_nr((_c)->context->proc_priv->pid), ##args) + (_c)->context->proc_priv->pid, ##args) static void adreno_fault_header(struct kgsl_device *device, diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index 93501d96ef8e..e756c82287fd 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, 2018,2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -168,7 +168,7 @@ static int _build_pre_ib_cmds(struct adreno_device *adreno_dev, ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, drawctxt->base.id, &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, - pid_nr(drawctxt->base.proc_priv->pid), &data_offset); + drawctxt->base.proc_priv->pid, &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, drawctxt->base.tid, &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index d303f73aa9aa..e081e579d142 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -265,9 +265,7 @@ kgsl_mem_entry_create(void) /* put this ref in the caller functions after init */ kref_get(&entry->refcount); - atomic_set(&entry->map_count, 0); } - return entry; } #ifdef CONFIG_DMA_SHARED_BUFFER @@ -542,7 +540,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv, if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) { KGSL_DRV_ERR_RATELIMIT(device, "Per process context limit reached for pid %u", - pid_nr(dev_priv->process_priv->pid)); + dev_priv->process_priv->pid); spin_unlock(&proc_priv->ctxt_count_lock); return -ENOSPC; } @@ -867,7 +865,6 @@ static void kgsl_destroy_process_private(struct kref *kref) struct kgsl_process_private *private = container_of(kref, struct kgsl_process_private, refcount); - put_pid(private->pid); idr_destroy(&private->mem_idr); idr_destroy(&private->syncsource_idr); @@ -898,7 +895,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid) mutex_lock(&kgsl_driver.process_mutex); list_for_each_entry(p, &kgsl_driver.process_list, list) { - if (pid_nr(p->pid) == pid) { + if (p->pid == pid) { if (kgsl_process_private_get(p)) private = p; break; @@ -912,34 +909,25 @@ static struct kgsl_process_private *kgsl_process_private_new( struct kgsl_device *device) { struct kgsl_process_private *private; - struct pid *cur_pid = get_task_pid(current->group_leader, PIDTYPE_PID); + pid_t tgid = task_tgid_nr(current); /* Search in the process list */ list_for_each_entry(private, &kgsl_driver.process_list, list) { - if (private->pid == cur_pid) { - if (!kgsl_process_private_get(private)) { + if (private->pid == tgid) { + if (!kgsl_process_private_get(private)) private = ERR_PTR(-EINVAL); - } - /* - * We need to hold only one reference to the PID for - * each process struct to avoid overflowing the - * reference counter which can lead to use-after-free. - */ - put_pid(cur_pid); return private; } } /* Create a new object */ private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL); - if (private == NULL) { - put_pid(cur_pid); + if (private == NULL) return ERR_PTR(-ENOMEM); - } kref_init(&private->refcount); - private->pid = cur_pid; + private->pid = tgid; get_task_comm(private->comm, current->group_leader); spin_lock_init(&private->mem_lock); @@ -950,14 +938,12 @@ static struct kgsl_process_private *kgsl_process_private_new( idr_init(&private->syncsource_idr); /* Allocate a pagetable for the new process object */ - private->pagetable = kgsl_mmu_getpagetable(&device->mmu, - pid_nr(cur_pid)); + private->pagetable = kgsl_mmu_getpagetable(&device->mmu, tgid); if (IS_ERR(private->pagetable)) { int err = PTR_ERR(private->pagetable); idr_destroy(&private->mem_idr); idr_destroy(&private->syncsource_idr); - put_pid(private->pid); kfree(private); private = ERR_PTR(err); @@ -1891,9 +1877,8 @@ static long gpumem_free_entry(struct kgsl_mem_entry *entry) if (entry->memdesc.pagetable != NULL) ptname = entry->memdesc.pagetable->name; - kgsl_memfree_add(pid_nr(entry->priv->pid), ptname, - entry->memdesc.gpuaddr, entry->memdesc.size, - entry->memdesc.flags); + kgsl_memfree_add(entry->priv->pid, ptname, entry->memdesc.gpuaddr, + entry->memdesc.size, entry->memdesc.flags); kgsl_mem_entry_put(entry); @@ -2140,7 +2125,7 @@ static int check_vma(unsigned long hostptr, u64 size) return true; } -static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr) +static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) { int ret = 0; long npages = 0, i; @@ -2162,13 +2147,13 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr) } down_read(¤t->mm->mmap_sem); - if (!check_vma(useraddr, memdesc->size)) { + if (!check_vma(memdesc->useraddr, memdesc->size)) { up_read(¤t->mm->mmap_sem); ret = ~EFAULT; goto out; } - npages = get_user_pages(current, current->mm, useraddr, + npages = get_user_pages(current, current->mm, memdesc->useraddr, sglen, write ? FOLL_WRITE : 0, pages, NULL); up_read(¤t->mm->mmap_sem); @@ -2201,34 +2186,29 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, { /* Map an anonymous memory chunk */ - int ret; - if (size == 0 || offset != 0 || !IS_ALIGNED(size, PAGE_SIZE)) return -EINVAL; entry->memdesc.pagetable = pagetable; entry->memdesc.size = (uint64_t) size; + entry->memdesc.useraddr = hostptr; entry->memdesc.flags |= KGSL_MEMFLAGS_USERMEM_ADDR; if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { + int ret; /* Register the address in the database */ ret = kgsl_mmu_set_svm_region(pagetable, - (uint64_t) hostptr, (uint64_t) size); + (uint64_t) entry->memdesc.useraddr, (uint64_t) size); if (ret) return ret; - entry->memdesc.gpuaddr = (uint64_t) hostptr; + entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr; } - ret = memdesc_sg_virt(&entry->memdesc, hostptr); - - if (ret && kgsl_memdesc_use_cpu_map(&entry->memdesc)) - kgsl_mmu_put_gpuaddr(&entry->memdesc); - - return ret; + return memdesc_sg_virt(&entry->memdesc); } static int match_file(const void *p, struct file *file, unsigned int fd) @@ -2313,8 +2293,8 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device, return ret; } - /* Setup the cache mode for cache operations */ - + /* Setup the user addr/cache mode for cache operations */ + entry->memdesc.useraddr = hostptr; _setup_cache_mode(entry, vma); up_read(¤t->mm->mmap_sem); return 0; @@ -2448,8 +2428,6 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv, if (entry == NULL) return -ENOMEM; - spin_lock_init(&entry->memdesc.lock); - param->flags &= KGSL_MEMFLAGS_GPUREADONLY | KGSL_MEMTYPE_MASK | KGSL_MEMALIGN_MASK @@ -2723,8 +2701,6 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, if (entry == NULL) return -ENOMEM; - spin_lock_init(&entry->memdesc.lock); - /* * Convert from enum value to KGSL_MEM_ENTRY value, so that * we can use the latter consistently everywhere. @@ -3336,12 +3312,7 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv, param->flags = (unsigned int) entry->memdesc.flags; param->size = (size_t) entry->memdesc.size; param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); - /* - * Entries can have multiple user mappings so thre isn't any one address - * we can report. Plus, the user should already know their mappings, so - * there isn't any value in reporting it back to them. - */ - param->useraddr = 0; + param->useraddr = entry->memdesc.useraddr; kgsl_mem_entry_put(entry); return result; @@ -3505,8 +3476,6 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv, if (entry == NULL) return -ENOMEM; - spin_lock_init(&entry->memdesc.lock); - entry->memdesc.flags = KGSL_MEMFLAGS_SPARSE_VIRT; entry->memdesc.size = param->size; entry->memdesc.cur_bindings = 0; @@ -3809,6 +3778,9 @@ static int _sparse_bind(struct kgsl_process_private *process, if (memdesc->gpuaddr) return -EINVAL; + if (memdesc->useraddr != 0) + return -EINVAL; + pagetable = memdesc->pagetable; /* Clear out any mappings */ @@ -4088,12 +4060,7 @@ long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv, param->flags = entry->memdesc.flags; param->size = entry->memdesc.size; param->va_len = kgsl_memdesc_footprint(&entry->memdesc); - /* - * Entries can have multiple user mappings so thre isn't any one address - * we can report. Plus, the user should already know their mappings, so - * there isn't any value in reporting it back to them. - */ - param->va_addr = 0; + param->va_addr = (uint64_t) entry->memdesc.useraddr; kgsl_mem_entry_put(entry); return 0; @@ -4261,8 +4228,6 @@ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) if (kgsl_mem_entry_get(entry) == 0) vma->vm_private_data = NULL; - - atomic_inc(&entry->map_count); } static int @@ -4286,8 +4251,7 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma) if (!entry) return; - atomic_dec(&entry->map_count); - + entry->memdesc.useraddr = 0; kgsl_mem_entry_put(entry); } @@ -4326,8 +4290,7 @@ get_mmap_entry(struct kgsl_process_private *private, } } - /* Don't allow ourselves to remap user memory */ - if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) { + if (entry->memdesc.useraddr != 0) { ret = -EBUSY; goto err_put; } @@ -4360,34 +4323,19 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private, { int ret; - /* - * Protect access to the gpuaddr here to prevent multiple vmas from - * trying to map a SVM region at the same time - */ - spin_lock(&entry->memdesc.lock); - - if (entry->memdesc.gpuaddr) { - spin_unlock(&entry->memdesc.lock); - return (unsigned long) -EBUSY; - } - ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr, (uint64_t) size); - if (ret != 0) { - spin_unlock(&entry->memdesc.lock); - return (unsigned long) ret; - } + if (ret != 0) + return ret; entry->memdesc.gpuaddr = (uint64_t) addr; - spin_unlock(&entry->memdesc.lock); - entry->memdesc.pagetable = private->pagetable; ret = kgsl_mmu_map(private->pagetable, &entry->memdesc); if (ret) { kgsl_mmu_put_gpuaddr(&entry->memdesc); - return (unsigned long) ret; + return ret; } kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr, @@ -4450,14 +4398,6 @@ static unsigned long _search_range(struct kgsl_process_private *private, result = _gpu_set_svm_region(private, entry, cpu, len); if (!IS_ERR_VALUE(result)) break; - /* - * _gpu_set_svm_region will return -EBUSY if we tried to set up - * SVM on an object that already has a GPU address. If - * that happens don't bother walking the rest of the - * region - */ - if ((long) result == -EBUSY) - return -EBUSY; trace_kgsl_mem_unmapped_area_collision(entry, cpu, len); @@ -4589,15 +4529,13 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr, if (IS_ERR_VALUE(val)) KGSL_DRV_ERR_RATELIMIT(device, "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n", - pid_nr(private->pid), addr, - pgoff, len, (int) val); + private->pid, addr, pgoff, len, (int) val); } else { val = _get_svm_area(private, entry, addr, len, flags); if (IS_ERR_VALUE(val)) KGSL_DRV_ERR_RATELIMIT(device, "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n", - pid_nr(private->pid), - current->mm->mmap_base, addr, + private->pid, current->mm->mmap_base, addr, pgoff, len, (int) val); } @@ -4674,9 +4612,9 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_file = file; - atomic_inc(&entry->map_count); + entry->memdesc.useraddr = vma->vm_start; - trace_kgsl_mem_mmap(entry, vma->vm_start); + trace_kgsl_mem_mmap(entry); return 0; } diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 31257d291c7e..6b8ef82d340f 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2016,2018-2019,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2016,2018-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -197,9 +197,11 @@ struct kgsl_memdesc_ops { * @pagetable: Pointer to the pagetable that the object is mapped in * @hostptr: Kernel virtual address * @hostptr_count: Number of threads using hostptr + * @useraddr: User virtual address (if applicable) * @gpuaddr: GPU virtual address * @physaddr: Physical address of the memory object * @size: Size of the memory object + * @mapsize: Size of memory mapped in userspace * @priv: Internal flags and settings * @sgt: Scatter gather table for allocated pages * @ops: Function hooks for the memdesc memory type @@ -214,9 +216,11 @@ struct kgsl_memdesc { struct kgsl_pagetable *pagetable; void *hostptr; unsigned int hostptr_count; + unsigned long useraddr; uint64_t gpuaddr; phys_addr_t physaddr; uint64_t size; + uint64_t mapsize; unsigned int priv; struct sg_table *sgt; struct kgsl_memdesc_ops *ops; @@ -226,11 +230,6 @@ struct kgsl_memdesc { struct page **pages; unsigned int page_count; unsigned int cur_bindings; - /* - * @lock: Spinlock to protect the gpuaddr from being accessed by - * multiple entities trying to map the same SVM region at once - */ - spinlock_t lock; }; /* @@ -279,11 +278,6 @@ struct kgsl_mem_entry { struct work_struct work; spinlock_t bind_lock; struct rb_root bind_tree; - /* - * @map_count: Count how many vmas this object is mapped in - used for - * debugfs accounting - */ - atomic_t map_count; }; struct kgsl_device_private; diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index 36bb4d801df9..592257a332d1 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2008-2017,2020-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -149,11 +149,7 @@ static int print_mem_entry(void *data, void *ptr) flags[3] = get_alignflag(m); flags[4] = get_cacheflag(m); flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; - /* - * Show Y if at least one vma has this entry - * mapped (could be multiple) - */ - flags[6] = atomic_read(&entry->map_count) ? 'Y' : 'N'; + flags[6] = (m->useraddr) ? 'Y' : 'N'; flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-'; flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-'; flags[9] = '\0'; @@ -164,16 +160,12 @@ static int print_mem_entry(void *data, void *ptr) kgsl_get_egl_counts(entry, &egl_surface_count, &egl_image_count); - seq_printf(s, "%pK %d %16llu %5d %9s %10s %16s %5d %16d %6d %6d", + seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d", (uint64_t *)(uintptr_t) m->gpuaddr, - /* - * Show zero for the useraddr - we can't reliably track - * that value for multiple vmas anyway - */ - 0, m->size, entry->id, flags, + (unsigned long *) m->useraddr, + m->size, entry->id, flags, memtype_str(usermem_type), - usage, (m->sgt ? m->sgt->nents : 0), - atomic_read(&entry->map_count), + usage, (m->sgt ? m->sgt->nents : 0), m->mapsize, egl_surface_count, egl_image_count); if (entry->metadata[0] != 0) @@ -243,7 +235,7 @@ static int process_mem_seq_show(struct seq_file *s, void *ptr) if (ptr == SEQ_START_TOKEN) { seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", "gpuaddr", "useraddr", "size", "id", "flags", "type", - "usage", "sglen", "mapcount", "eglsrf", "eglimg"); + "usage", "sglen", "mapsize", "eglsrf", "eglimg"); return 0; } else return print_mem_entry(s, ptr); @@ -401,7 +393,7 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private) unsigned char name[16]; struct dentry *dentry; - snprintf(name, sizeof(name), "%d", pid_nr(private->pid)); + snprintf(name, sizeof(name), "%d", private->pid); private->debug_root = debugfs_create_dir(name, proc_d_debugfs); @@ -421,15 +413,14 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private) } dentry = debugfs_create_file("mem", 0444, private->debug_root, - (void *) ((unsigned long) pid_nr(private->pid)), - &process_mem_fops); + (void *) ((unsigned long) private->pid), &process_mem_fops); if (IS_ERR_OR_NULL(dentry)) WARN((dentry == NULL), "Unable to create 'mem' file for %s\n", name); dentry = debugfs_create_file("sparse_mem", 0444, private->debug_root, - (void *) ((unsigned long) pid_nr(private->pid)), + (void *) ((unsigned long) private->pid), &process_sparse_mem_fops); if (IS_ERR_OR_NULL(dentry)) diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 14852326116c..0ab6041b3bbd 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -405,13 +405,13 @@ struct kgsl_context { #define pr_context(_d, _c, fmt, args...) \ dev_err((_d)->dev, "%s[%d]: " fmt, \ _context_comm((_c)), \ - pid_nr((_c)->proc_priv->pid), ##args) + (_c)->proc_priv->pid, ##args) /** * struct kgsl_process_private - Private structure for a KGSL process (across * all devices) * @priv: Internal flags, use KGSL_PROCESS_* values - * @pid: Identification structure for the task owner of the process + * @pid: ID for the task owner of the process * @comm: task name of the process * @mem_lock: Spinlock to protect the process memory lists * @refcount: kref object for reference counting the process @@ -428,7 +428,7 @@ struct kgsl_context { */ struct kgsl_process_private { unsigned long priv; - struct pid *pid; + pid_t pid; char comm[TASK_COMM_LEN]; spinlock_t mem_lock; struct kref refcount; diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index bc11111bb74f..9ba15b61af00 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017,2019,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017,2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -585,7 +585,6 @@ static void add_profiling_buffer(struct kgsl_device *device, { struct kgsl_mem_entry *entry; struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - u64 start; if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING)) return; @@ -602,14 +601,7 @@ static void add_profiling_buffer(struct kgsl_device *device, gpuaddr); if (entry != NULL) { - start = id ? (entry->memdesc.gpuaddr + offset) : gpuaddr; - /* - * Make sure there is enough room in the object to store the - * entire profiling buffer object - */ - if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size) || - !kgsl_gpuaddr_in_memdesc(&entry->memdesc, start, - sizeof(struct kgsl_drawobj_profiling_buffer))) { + if (!kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) { kgsl_mem_entry_put(entry); entry = NULL; } @@ -622,7 +614,28 @@ static void add_profiling_buffer(struct kgsl_device *device, return; } - cmdobj->profiling_buffer_gpuaddr = start; + + if (!id) { + cmdobj->profiling_buffer_gpuaddr = gpuaddr; + } else { + u64 off = offset + sizeof(struct kgsl_drawobj_profiling_buffer); + + /* + * Make sure there is enough room in the object to store the + * entire profiling buffer object + */ + if (off < offset || off >= entry->memdesc.size) { + dev_err(device->dev, + "ignore invalid profile offset ctxt %d id %d offset %lld gpuaddr %llx size %lld\n", + drawobj->context->id, id, offset, gpuaddr, size); + kgsl_mem_entry_put(entry); + return; + } + + cmdobj->profiling_buffer_gpuaddr = + entry->memdesc.gpuaddr + offset; + } + cmdobj->profiling_buf_entry = entry; } diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 98537730cbc9..f6ff4658c93b 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -199,9 +199,8 @@ static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu, static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, struct kgsl_memdesc *memdesc, const char *name) { - u32 bit; + u32 bit, start = 0; u64 size = kgsl_memdesc_footprint(memdesc); - int start = 0; if (memdesc->gpuaddr != 0) return; @@ -271,7 +270,6 @@ static void kgsl_setup_qdss_desc(struct kgsl_device *device) return; } - spin_lock_init(&gpu_qdss_desc.lock); gpu_qdss_desc.flags = 0; gpu_qdss_desc.priv = 0; gpu_qdss_desc.physaddr = gpu_qdss_entry[0]; @@ -317,7 +315,6 @@ static void kgsl_setup_qtimer_desc(struct kgsl_device *device) return; } - spin_lock_init(&gpu_qtimer_desc.lock); gpu_qtimer_desc.flags = 0; gpu_qtimer_desc.priv = 0; gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0]; @@ -695,7 +692,7 @@ static void _get_entries(struct kgsl_process_private *private, prev->flags = p->memdesc.flags; prev->priv = p->memdesc.priv; prev->pending_free = p->pending_free; - prev->pid = pid_nr(private->pid); + prev->pid = private->pid; __kgsl_get_memory_usage(prev); } @@ -705,7 +702,7 @@ static void _get_entries(struct kgsl_process_private *private, next->flags = n->memdesc.flags; next->priv = n->memdesc.priv; next->pending_free = n->pending_free; - next->pid = pid_nr(private->pid); + next->pid = private->pid; __kgsl_get_memory_usage(next); } } @@ -1502,7 +1499,6 @@ static int _setstate_alloc(struct kgsl_device *device, { int ret; - spin_lock_init(&iommu->setstate.lock); ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE); if (!ret) { @@ -2481,11 +2477,6 @@ static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable, goto out; } - /* - * This path is only called in a non-SVM path with locks so we can be - * sure we aren't racing with anybody so we don't need to worry about - * taking the lock - */ ret = _insert_gpuaddr(pagetable, addr, size); if (ret == 0) { memdesc->gpuaddr = addr; diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index d0a45e713d0d..aa7157e882ac 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -432,8 +432,7 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc) if (memdesc->size == 0 || memdesc->gpuaddr == 0) return; - if (!kgsl_memdesc_is_global(memdesc) && - (KGSL_MEMDESC_MAPPED & memdesc->priv)) + if (!kgsl_memdesc_is_global(memdesc)) unmap_fail = kgsl_mmu_unmap(pagetable, memdesc); /* @@ -444,16 +443,10 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc) if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0)) pagetable->pt_ops->put_gpuaddr(memdesc); - memdesc->pagetable = NULL; - - /* - * If SVM tries to take a GPU address it will lose the race until the - * gpuaddr returns to zero so we shouldn't need to worry about taking a - * lock here - */ if (!kgsl_memdesc_is_global(memdesc)) memdesc->gpuaddr = 0; + memdesc->pagetable = NULL; } EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index dfbea53c306b..4c54553e9977 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017,2020-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -202,7 +202,7 @@ void kgsl_process_init_sysfs(struct kgsl_device *device, /* Keep private valid until the sysfs enries are removed. */ kgsl_process_private_get(private); - snprintf(name, sizeof(name), "%d", pid_nr(private->pid)); + snprintf(name, sizeof(name), "%d", private->pid); if (kobject_init_and_add(&private->kobj, &ktype_mem_entry, kgsl_driver.prockobj, name)) { @@ -342,7 +342,6 @@ int kgsl_allocate_user(struct kgsl_device *device, int ret; memdesc->flags = flags; - spin_lock_init(&memdesc->lock); if (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE) ret = kgsl_sharedmem_alloc_contig(device, memdesc, size); @@ -374,6 +373,8 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc, get_page(page); vmf->page = page; + memdesc->mapsize += PAGE_SIZE; + return 0; } @@ -503,6 +504,8 @@ static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc, else if (ret == -EFAULT) return VM_FAULT_SIGBUS; + memdesc->mapsize += PAGE_SIZE; + return VM_FAULT_NOPAGE; } diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index ee89cfb808d7..e5da594b77b8 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2017,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -283,7 +283,6 @@ static inline int kgsl_allocate_global(struct kgsl_device *device, memdesc->flags = flags; memdesc->priv = priv; - spin_lock_init(&memdesc->lock); if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) || (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE)) diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 5f325729f71e..6438c6e65b97 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016,2020-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -417,7 +417,7 @@ TRACE_EVENT(kgsl_mem_alloc, TP_fast_assign( __entry->gpuaddr = mem_entry->memdesc.gpuaddr; __entry->size = mem_entry->memdesc.size; - __entry->tgid = pid_nr(mem_entry->priv->pid); + __entry->tgid = mem_entry->priv->pid; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -433,9 +433,9 @@ TRACE_EVENT(kgsl_mem_alloc, TRACE_EVENT(kgsl_mem_mmap, - TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned long useraddr), + TP_PROTO(struct kgsl_mem_entry *mem_entry), - TP_ARGS(mem_entry, useraddr), + TP_ARGS(mem_entry), TP_STRUCT__entry( __field(unsigned long, useraddr) @@ -447,7 +447,7 @@ TRACE_EVENT(kgsl_mem_mmap, ), TP_fast_assign( - __entry->useraddr = useraddr; + __entry->useraddr = mem_entry->memdesc.useraddr; __entry->gpuaddr = mem_entry->memdesc.gpuaddr; __entry->size = mem_entry->memdesc.size; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), @@ -510,7 +510,7 @@ TRACE_EVENT(kgsl_mem_map, __entry->size = mem_entry->memdesc.size; __entry->fd = fd; __entry->type = kgsl_memdesc_usermem_type(&mem_entry->memdesc); - __entry->tgid = pid_nr(mem_entry->priv->pid); + __entry->tgid = mem_entry->priv->pid; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -545,7 +545,7 @@ TRACE_EVENT(kgsl_mem_free, __entry->gpuaddr = mem_entry->memdesc.gpuaddr; __entry->size = mem_entry->memdesc.size; __entry->type = kgsl_memdesc_usermem_type(&mem_entry->memdesc); - __entry->tgid = pid_nr(mem_entry->priv->pid); + __entry->tgid = mem_entry->priv->pid; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -580,7 +580,7 @@ TRACE_EVENT(kgsl_mem_sync_cache, __entry->gpuaddr = mem_entry->memdesc.gpuaddr; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); - __entry->tgid = pid_nr(mem_entry->priv->pid); + __entry->tgid = mem_entry->priv->pid; __entry->id = mem_entry->id; __entry->op = op; __entry->offset = offset; diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index f9310faf9c90..c5d72a3f1e49 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -172,14 +172,14 @@ config HID_CHERRY config HID_CHICONY tristate "Chicony devices" - depends on USB_HID + depends on HID default !EXPERT ---help--- Support for Chicony Tactical pad and special keys on Chicony keyboards. config HID_CORSAIR tristate "Corsair devices" - depends on USB_HID && LEDS_CLASS + depends on HID && USB && LEDS_CLASS ---help--- Support for Corsair devices that are not fully compliant with the HID standard. @@ -189,7 +189,7 @@ config HID_CORSAIR config HID_PRODIKEYS tristate "Prodikeys PC-MIDI Keyboard support" - depends on USB_HID && SND + depends on HID && SND select SND_RAWMIDI ---help--- Support for Prodikeys PC-MIDI Keyboard device support. @@ -395,7 +395,7 @@ config HID_LENOVO config HID_LOGITECH tristate "Logitech devices" - depends on USB_HID + depends on HID default !EXPERT ---help--- Support for Logitech devices that are not fully compliant with HID standard. @@ -710,7 +710,7 @@ config HID_SAITEK config HID_SAMSUNG tristate "Samsung InfraRed remote control or keyboards" - depends on USB_HID + depends on HID ---help--- Support for Samsung InfraRed remote control or keyboards. @@ -852,7 +852,7 @@ config THRUSTMASTER_FF config HID_WACOM tristate "Wacom Intuos/Graphire tablet support (USB)" - depends on USB_HID + depends on HID select POWER_SUPPLY select NEW_LEDS select LEDS_CLASS diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index d934d17f7a48..51da6aa8d7e8 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -59,7 +59,7 @@ obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o -obj-$(CONFIG_HID_NINTENDO) += hid-nintendo.o +obj-$(CONFIG_HID_NINTENDO) += hid-nintendo.o obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o obj-$(CONFIG_HID_ORTEK) += hid-ortek.o obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 8ced12c57cce..ec4250061922 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -301,19 +301,12 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field, /* * MacBook JIS keyboard has wrong logical maximum - * Magic Keyboard JIS has wrong logical maximum */ static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct apple_sc *asc = hid_get_drvdata(hdev); - if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) { - hid_info(hdev, - "fixing up Magic Keyboard JIS report descriptor\n"); - rdesc[64] = rdesc[70] = 0xe7; - } - if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) { hid_info(hdev, diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c index 9b60efe6ec44..69cfc8dc6af1 100644 --- a/drivers/hid/hid-betopff.c +++ b/drivers/hid/hid-betopff.c @@ -59,22 +59,15 @@ static int betopff_init(struct hid_device *hid) { struct betopff_device *betopff; struct hid_report *report; - struct hid_input *hidinput; + struct hid_input *hidinput = + list_first_entry(&hid->inputs, struct hid_input, list); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; - struct input_dev *dev; + struct input_dev *dev = hidinput->input; int field_count = 0; int error; int i, j; - if (list_empty(&hid->inputs)) { - hid_err(hid, "no inputs found\n"); - return -ENODEV; - } - - hidinput = list_first_entry(&hid->inputs, struct hid_input, list); - dev = hidinput->input; - if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index f11948ddf642..f04ed9aabc3f 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -61,12 +61,8 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - struct usb_interface *intf; - - if (!hid_is_usb(hdev)) - return rdesc; - - intf = to_usb_interface(hdev->dev.parent); + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { /* Change usage maximum and logical maximum from 0x7fff to * 0x2fff, so they don't exceed HID_MAX_USAGES */ diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index a4253d540bb7..ee0de3a2d6cb 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -91,7 +91,7 @@ EXPORT_SYMBOL_GPL(hid_register_report); * Register a new field for this report. */ -static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) +static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values) { struct hid_field *field; @@ -102,7 +102,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned field = kzalloc((sizeof(struct hid_field) + usages * sizeof(struct hid_usage) + - usages * sizeof(unsigned)), GFP_KERNEL); + values * sizeof(unsigned)), GFP_KERNEL); if (!field) return NULL; @@ -289,7 +289,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign usages = max_t(unsigned, parser->local.usage_index, parser->global.report_count); - field = hid_register_field(report, usages); + field = hid_register_field(report, usages, parser->global.report_count); if (!field) return 0; @@ -1124,9 +1124,6 @@ EXPORT_SYMBOL_GPL(hid_open_report); static s32 snto32(__u32 value, unsigned n) { - if (!value || !n) - return 0; - switch (n) { case 8: return ((__s8)value); case 16: return ((__s16)value); @@ -1780,9 +1777,6 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) case BUS_I2C: bus = "I2C"; break; - case BUS_VIRTUAL: - bus = "VIRTUAL"; - break; default: bus = ""; } @@ -2089,16 +2083,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) }, #endif -#if IS_ENABLED(CONFIG_HID_NINTENDO) - { HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO, - USB_DEVICE_ID_NINTENDO_PROCON) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, - USB_DEVICE_ID_NINTENDO_PROCON) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, - USB_DEVICE_ID_NINTENDO_JOYCONL) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, - USB_DEVICE_ID_NINTENDO_JOYCONR) }, -#endif #if IS_ENABLED(CONFIG_HID_SAITEK) { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) }, @@ -2121,7 +2105,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) }, @@ -2153,6 +2136,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_VALVE, USB_DEVICE_ID_STEAM_CONTROLLER_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index 0f429034f205..88be56321610 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c @@ -551,12 +551,7 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id) int ret; unsigned long quirks = id->driver_data; struct corsair_drvdata *drvdata; - struct usb_interface *usbif; - - if (!hid_is_usb(dev)) - return -EINVAL; - - usbif = to_usb_interface(dev->dev.parent); + struct usb_interface *usbif = to_usb_interface(dev->dev.parent); drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata), GFP_KERNEL); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 07dea71d41fc..3cafa1d28fed 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -833,9 +833,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", - [KEY_ALL_APPLICATIONS] = "AllApplications", - [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index c3ecac13e620..5eea6fe0d7bd 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -230,9 +230,6 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) struct elo_priv *priv; int ret; - if (!hid_is_usb(hdev)) - return -EINVAL; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c index 2991957bbb7f..0d6f135e266c 100644 --- a/drivers/hid/hid-gt683r.c +++ b/drivers/hid/hid-gt683r.c @@ -64,7 +64,6 @@ static const struct hid_device_id gt683r_led_id[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { } }; -MODULE_DEVICE_TABLE(hid, gt683r_led_id); static void gt683r_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index 2f8eb6639744..ab9da597106f 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c @@ -143,17 +143,12 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, static int holtek_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *intf; - int ret; - - if (!hid_is_usb(hdev)) - return -EINVAL; + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + int ret = hid_parse(hdev); - ret = hid_parse(hdev); if (!ret) ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - intf = to_usb_interface(hdev->dev.parent); if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) { struct hid_input *hidinput; list_for_each_entry(hidinput, &hdev->inputs, list) { diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c index 96db7e96fcea..78b3a0c76775 100644 --- a/drivers/hid/hid-holtek-mouse.c +++ b/drivers/hid/hid-holtek-mouse.c @@ -65,29 +65,6 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } -static int holtek_mouse_probe(struct hid_device *hdev, - const struct hid_device_id *id) -{ - int ret; - - if (!hid_is_usb(hdev)) - return -EINVAL; - - ret = hid_parse(hdev); - if (ret) { - hid_err(hdev, "hid parse failed: %d\n", ret); - return ret; - } - - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - if (ret) { - hid_err(hdev, "hw start failed: %d\n", ret); - return ret; - } - - return 0; -} - static const struct hid_device_id holtek_mouse_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) }, @@ -109,7 +86,6 @@ static struct hid_driver holtek_mouse_driver = { .name = "holtek_mouse", .id_table = holtek_mouse_devices, .report_fixup = holtek_mouse_report_fixup, - .probe = holtek_mouse_probe, }; module_hid_driver(holtek_mouse_driver); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 8dc7730d98f1..0194cc2d8e07 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -773,7 +773,6 @@ #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 #define USB_VENDOR_ID_PLANTRONICS 0x047f -#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056 #define USB_VENDOR_ID_PANASONIC 0x04da #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 @@ -911,6 +910,7 @@ #define USB_VENDOR_ID_VALVE 0x28de #define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102 #define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142 +#define USB_DEVICE_ID_STEAM_CONTROLLER_BT 0x1106 #define USB_VENDOR_ID_STEELSERIES 0x1038 #define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index da29234a5e3f..c033d12070c3 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -303,7 +303,6 @@ static enum power_supply_property hidinput_battery_props[] = { #define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */ #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */ -#define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, @@ -321,9 +320,6 @@ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, - USB_DEVICE_ID_ELECOM_BM084), - HID_BATTERY_QUIRK_IGNORE }, {} }; @@ -339,45 +335,13 @@ static unsigned find_battery_quirk(struct hid_device *hdev) return quirks; } -static int hidinput_scale_battery_capacity(struct hid_device *dev, - int value) -{ - if (dev->battery_min < dev->battery_max && - value >= dev->battery_min && value <= dev->battery_max) - value = ((value - dev->battery_min) * 100) / - (dev->battery_max - dev->battery_min); - - return value; -} - -static int hidinput_query_battery_capacity(struct hid_device *dev) -{ - u8 *buf; - int ret; - - buf = kmalloc(2, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, - dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { - kfree(buf); - return -ENODATA; - } - - ret = hidinput_scale_battery_capacity(dev, buf[1]); - kfree(buf); - return ret; -} - static int hidinput_get_battery_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct hid_device *dev = power_supply_get_drvdata(psy); - int value; int ret = 0; + __u8 *buf; switch (prop) { case POWER_SUPPLY_PROP_PRESENT: @@ -386,15 +350,29 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: - if (dev->battery_report_type == HID_FEATURE_REPORT) { - value = hidinput_query_battery_capacity(dev); - if (value < 0) - return value; - } else { - value = dev->battery_capacity; + + buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + break; } + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + dev->battery_report_type, + HID_REQ_GET_REPORT); - val->intval = value; + if (ret != 2) { + ret = -ENODATA; + kfree(buf); + break; + } + ret = 0; + + if (dev->battery_min < dev->battery_max && + buf[1] >= dev->battery_min && + buf[1] <= dev->battery_max) + val->intval = (100 * (buf[1] - dev->battery_min)) / + (dev->battery_max - dev->battery_min); + kfree(buf); break; case POWER_SUPPLY_PROP_MODEL_NAME: @@ -402,22 +380,7 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_STATUS: - if (!dev->battery_reported && - dev->battery_report_type == HID_FEATURE_REPORT) { - value = hidinput_query_battery_capacity(dev); - if (value < 0) - return value; - - dev->battery_capacity = value; - dev->battery_reported = true; - } - - if (!dev->battery_reported) - val->intval = POWER_SUPPLY_STATUS_UNKNOWN; - else if (dev->battery_capacity == 100) - val->intval = POWER_SUPPLY_STATUS_FULL; - else - val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; case POWER_SUPPLY_PROP_SCOPE: @@ -432,33 +395,27 @@ static int hidinput_get_battery_property(struct power_supply *psy, return ret; } -static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field) +static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field) { - struct power_supply_desc *psy_desc; + struct power_supply_desc *psy_desc = NULL; struct power_supply_config psy_cfg = { .drv_data = dev, }; unsigned quirks; s32 min, max; - int error; - - if (dev->battery) - return 0; /* already initialized? */ - - quirks = find_battery_quirk(dev); - hid_dbg(dev, "device %x:%x:%x %d quirks %d\n", - dev->bus, dev->vendor, dev->product, dev->version, quirks); + if (field->usage->hid != HID_DC_BATTERYSTRENGTH) + return false; /* no match */ - if (quirks & HID_BATTERY_QUIRK_IGNORE) - return 0; + if (dev->battery != NULL) + goto out; /* already initialized? */ psy_desc = kzalloc(sizeof(*psy_desc), GFP_KERNEL); - if (!psy_desc) - return -ENOMEM; + if (psy_desc == NULL) + goto out; psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq); - if (!psy_desc->name) { - error = -ENOMEM; - goto err_free_mem; + if (psy_desc->name == NULL) { + kfree(psy_desc); + goto out; } psy_desc->type = POWER_SUPPLY_TYPE_BATTERY; @@ -467,6 +424,11 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, psy_desc->use_for_apm = 0; psy_desc->get_property = hidinput_get_battery_property; + quirks = find_battery_quirk(dev); + + hid_dbg(dev, "device %x:%x:%x %d quirks %d\n", + dev->bus, dev->vendor, dev->product, dev->version, quirks); + min = field->logical_minimum; max = field->logical_maximum; @@ -485,20 +447,17 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { - error = PTR_ERR(dev->battery); - hid_warn(dev, "can't register power supply: %d\n", error); - goto err_free_name; + hid_warn(dev, "can't register power supply: %ld\n", + PTR_ERR(dev->battery)); + kfree(psy_desc->name); + kfree(psy_desc); + dev->battery = NULL; + } else { + power_supply_powers(dev->battery, &dev->dev); } - power_supply_powers(dev->battery, &dev->dev); - return 0; - -err_free_name: - kfree(psy_desc->name); -err_free_mem: - kfree(psy_desc); - dev->battery = NULL; - return error; +out: + return true; } static void hidinput_cleanup_battery(struct hid_device *dev) @@ -514,33 +473,16 @@ static void hidinput_cleanup_battery(struct hid_device *dev) kfree(psy_desc); dev->battery = NULL; } - -static void hidinput_update_battery(struct hid_device *dev, int value) -{ - if (!dev->battery) - return; - - if (value == 0 || value < dev->battery_min || value > dev->battery_max) - return; - - dev->battery_capacity = hidinput_scale_battery_capacity(dev, value); - dev->battery_reported = true; - power_supply_changed(dev->battery); -} #else /* !CONFIG_HID_BATTERY_STRENGTH */ -static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, - struct hid_field *field) +static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, + struct hid_field *field) { - return 0; + return false; } static void hidinput_cleanup_battery(struct hid_device *dev) { } - -static void hidinput_update_battery(struct hid_device *dev, int value) -{ -} #endif /* CONFIG_HID_BATTERY_STRENGTH */ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field, @@ -742,11 +684,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } break; - case 0x3b: /* Battery Strength */ - hidinput_setup_battery(device, HID_INPUT_REPORT, field); - usage->type = EV_PWR; - goto ignore; - case 0x3c: /* Invert */ map_key_clear(BTN_TOOL_RUBBER); break; @@ -973,8 +910,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x28b: map_key_clear(KEY_FORWARDMAIL); break; case 0x28c: map_key_clear(KEY_SEND); break; - case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; - case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; @@ -989,13 +924,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; case HID_UP_GENDEVCTRLS: - switch (usage->hid) { - case HID_DC_BATTERYSTRENGTH: - hidinput_setup_battery(device, HID_INPUT_REPORT, field); - usage->type = EV_PWR; + if (hidinput_setup_battery(device, HID_INPUT_REPORT, field)) goto ignore; - } - goto unknown; + else + goto unknown; + break; case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */ set_bit(EV_REP, input->evbit); @@ -1083,6 +1016,7 @@ mapped: if (usage->code > max) goto ignore; + if (usage->type == EV_ABS) { int a = field->logical_minimum; @@ -1143,19 +1077,14 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct struct input_dev *input; unsigned *quirks = &hid->quirks; - if (!usage->type) - return; - - if (usage->type == EV_PWR) { - hidinput_update_battery(hid, value); - return; - } - if (!field->hidinput) return; input = field->hidinput->input; + if (!usage->type) + return; + if (usage->hat_min < usage->hat_max || usage->hat_dir) { int hat_dir = usage->hat_dir; if (!hat_dir) @@ -1432,7 +1361,6 @@ static void report_features(struct hid_device *hid) struct hid_driver *drv = hid->driver; struct hid_report_enum *rep_enum; struct hid_report *rep; - struct hid_usage *usage; int i, j; rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; @@ -1443,15 +1371,12 @@ static void report_features(struct hid_device *hid) continue; for (j = 0; j < rep->field[i]->maxusage; j++) { - usage = &rep->field[i]->usage[j]; - /* Verify if Battery Strength feature is available */ - if (usage->hid == HID_DC_BATTERYSTRENGTH) - hidinput_setup_battery(hid, HID_FEATURE_REPORT, - rep->field[i]); + hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]); if (drv->feature_mapping) - drv->feature_mapping(hid, rep->field[i], usage); + drv->feature_mapping(hid, rep->field[i], + rep->field[i]->usage + j); } } } diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index c77e2303ba3e..0fd9fc135f3d 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -659,18 +659,12 @@ static int lg_event(struct hid_device *hdev, struct hid_field *field, static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct usb_interface *iface; - __u8 iface_num; + struct usb_interface *iface = to_usb_interface(hdev->dev.parent); + __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; - if (!hid_is_usb(hdev)) - return -EINVAL; - - iface = to_usb_interface(hdev->dev.parent); - iface_num = iface->cur_altsetting->desc.bInterfaceNumber; - /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 460711c1124a..584b10d3fc3d 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -16,7 +16,6 @@ #include #include -#include #define PLT_HID_1_0_PAGE 0xffa00000 #define PLT_HID_2_0_PAGE 0xffa20000 @@ -40,16 +39,6 @@ #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) -#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) - -#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ - -struct plt_drv_data { - unsigned long device_type; - unsigned long last_volume_key_ts; - u32 quirks; -}; - static int plantronics_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, @@ -57,8 +46,7 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned long **bit, int *max) { unsigned short mapped_key; - struct plt_drv_data *drv_data = hid_get_drvdata(hdev); - unsigned long plt_type = drv_data->device_type; + unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); /* special case for PTT products */ if (field->application == HID_GD_JOYSTICK) @@ -120,30 +108,6 @@ mapped: return 1; } -static int plantronics_event(struct hid_device *hdev, struct hid_field *field, - struct hid_usage *usage, __s32 value) -{ - struct plt_drv_data *drv_data = hid_get_drvdata(hdev); - - if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) { - unsigned long prev_ts, cur_ts; - - /* Usages are filtered in plantronics_usages. */ - - if (!value) /* Handle key presses only. */ - return 0; - - prev_ts = drv_data->last_volume_key_ts; - cur_ts = jiffies; - if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT) - return 1; /* Ignore the repeated key. */ - - drv_data->last_volume_key_ts = cur_ts; - } - - return 0; -} - static unsigned long plantronics_device_type(struct hid_device *hdev) { unsigned i, col_page; @@ -172,24 +136,15 @@ exit: static int plantronics_probe(struct hid_device *hdev, const struct hid_device_id *id) { - struct plt_drv_data *drv_data; int ret; - drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL); - if (!drv_data) - return -ENOMEM; - ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } - drv_data->device_type = plantronics_device_type(hdev); - drv_data->quirks = id->driver_data; - drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); - - hid_set_drvdata(hdev, drv_data); + hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev)); ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE); @@ -201,26 +156,15 @@ err: } static const struct hid_device_id plantronics_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, plantronics_devices); -static const struct hid_usage_id plantronics_usages[] = { - { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID }, - { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID }, - { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR } -}; - static struct hid_driver plantronics_driver = { .name = "plantronics", .id_table = plantronics_devices, - .usage_table = plantronics_usages, .input_mapping = plantronics_input_mapping, - .event = plantronics_event, .probe = plantronics_probe, }; module_hid_driver(plantronics_driver); diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c index e708152d3ea0..cba15edd47c2 100644 --- a/drivers/hid/hid-prodikeys.c +++ b/drivers/hid/hid-prodikeys.c @@ -803,18 +803,12 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; - struct usb_interface *intf; - unsigned short ifnum; + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; - if (!hid_is_usb(hdev)) - return -EINVAL; - - intf = to_usb_interface(hdev->dev.parent); - ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c index 58ab1d878a94..1948208fe038 100644 --- a/drivers/hid/hid-roccat-arvo.c +++ b/drivers/hid/hid-roccat-arvo.c @@ -349,9 +349,6 @@ static int arvo_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c index 529dcc51a2be..bc62ed91e451 100644 --- a/drivers/hid/hid-roccat-isku.c +++ b/drivers/hid/hid-roccat-isku.c @@ -329,9 +329,6 @@ static int isku_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index 227504764c2f..6c2b821c8d8b 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -756,9 +756,6 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 3403cc528f4e..5e99fcdc71b9 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -438,9 +438,6 @@ static int koneplus_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c index ef9508822e5f..07de2f9014c6 100644 --- a/drivers/hid/hid-roccat-konepure.c +++ b/drivers/hid/hid-roccat-konepure.c @@ -136,9 +136,6 @@ static int konepure_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c index 549b15ef79b9..1073c0d1fae5 100644 --- a/drivers/hid/hid-roccat-kovaplus.c +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -508,9 +508,6 @@ static int kovaplus_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c index 6a8dc17cddf2..65e2e76bf2fe 100644 --- a/drivers/hid/hid-roccat-lua.c +++ b/drivers/hid/hid-roccat-lua.c @@ -163,9 +163,6 @@ static int lua_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 4fed4b899da2..47d7e74231e5 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -457,9 +457,6 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c index fda4a396a12e..47cc8f30ff6d 100644 --- a/drivers/hid/hid-roccat-ryos.c +++ b/drivers/hid/hid-roccat-ryos.c @@ -144,9 +144,6 @@ static int ryos_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c index 0230fb54f08a..6dbf6e04dce7 100644 --- a/drivers/hid/hid-roccat-savu.c +++ b/drivers/hid/hid-roccat-savu.c @@ -116,9 +116,6 @@ static int savu_probe(struct hid_device *hdev, { int retval; - if (!hid_is_usb(hdev)) - return -EINVAL; - retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c index 89bb2260367f..7cbb067d4a9e 100644 --- a/drivers/hid/hid-samsung.c +++ b/drivers/hid/hid-samsung.c @@ -157,9 +157,6 @@ static int samsung_probe(struct hid_device *hdev, int ret; unsigned int cmask = HID_CONNECT_DEFAULT; - if (!hid_is_usb(hdev)) - return -EINVAL; - ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index ce4e9b34af98..83e45d5801a9 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -222,21 +222,16 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, buffer_size = buffer_size / sizeof(__s32); if (buffer_size) { for (i = 0; i < buffer_size; ++i) { - ret = hid_set_field(report->field[field_index], i, - (__force __s32)cpu_to_le32(*buf32)); - if (ret) - goto done_proc; - + hid_set_field(report->field[field_index], i, + (__force __s32)cpu_to_le32(*buf32)); ++buf32; } } if (remaining_bytes) { value = 0; memcpy(&value, (u8 *)buf32, remaining_bytes); - ret = hid_set_field(report->field[field_index], i, - (__force __s32)cpu_to_le32(value)); - if (ret) - goto done_proc; + hid_set_field(report->field[field_index], i, + (__force __s32)cpu_to_le32(value)); } hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT); hid_hw_wait(hsdev->hdev); diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index a4a6c90c8134..44e1eefc5b24 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -768,12 +768,8 @@ static int steam_probe(struct hid_device *hdev, if (steam->quirks & STEAM_QUIRK_WIRELESS) { hid_info(hdev, "Steam wireless receiver connected"); - /* If using a wireless adaptor ask for connection status */ - steam->connected = false; steam_request_conn_status(steam); } else { - /* A wired connection is always present */ - steam->connected = true; ret = steam_register(steam); if (ret) { hid_err(hdev, diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 4cf0aaad5619..85ac43517e3f 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -795,9 +795,6 @@ static int uclogic_tablet_enable(struct hid_device *hdev) __u8 *p; s32 v; - if (!hid_is_usb(hdev)) - return -EINVAL; - /* * Read string descriptor containing tablet parameters. The specific * string descriptor and data were discovered by sniffing the Windows diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index bd087e849090..4248d253c32a 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -789,7 +789,7 @@ static int i2c_hid_power(struct hid_device *hid, int lvl) return 0; } -struct hid_ll_driver i2c_hid_ll_driver = { +static struct hid_ll_driver i2c_hid_ll_driver = { .parse = i2c_hid_parse, .start = i2c_hid_start, .stop = i2c_hid_stop, @@ -799,7 +799,6 @@ struct hid_ll_driver i2c_hid_ll_driver = { .output_report = i2c_hid_output_report, .raw_request = i2c_hid_raw_request, }; -EXPORT_SYMBOL_GPL(i2c_hid_ll_driver); static int i2c_hid_init_irq(struct i2c_client *client) { diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 83611f38ec13..672f26481f2d 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -35,22 +35,11 @@ static DEFINE_MUTEX(uhid_open_mutex); struct uhid_device { struct mutex devlock; - - /* This flag tracks whether the HID device is usable for commands from - * userspace. The flag is already set before hid_add_device(), which - * runs in workqueue context, to allow hid_add_device() to communicate - * with userspace. - * However, if hid_add_device() fails, the flag is cleared without - * holding devlock. - * We guarantee that if @running changes from true to false while you're - * holding @devlock, it's still fine to access @hid. - */ bool running; __u8 *rd_data; uint rd_size; - /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; @@ -81,18 +70,9 @@ static void uhid_device_add_worker(struct work_struct *work) if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); - /* We used to call hid_destroy_device() here, but that's really - * messy to get right because we have to coordinate with - * concurrent writes from userspace that might be in the middle - * of using uhid->hid. - * Just leave uhid->hid as-is for now, and clean it up when - * userspace tries to close or reinitialize the uhid instance. - * - * However, we do have to clear the ->running flag and do a - * wakeup to make sure userspace knows that the device is gone. - */ + hid_destroy_device(uhid->hid); + uhid->hid = NULL; uhid->running = false; - wake_up_interruptible(&uhid->report_wait); } } @@ -405,7 +385,7 @@ static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf, return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); } -struct hid_ll_driver uhid_hid_driver = { +static struct hid_ll_driver uhid_hid_driver = { .start = uhid_hid_start, .stop = uhid_hid_stop, .open = uhid_hid_open, @@ -414,7 +394,6 @@ struct hid_ll_driver uhid_hid_driver = { .raw_request = uhid_hid_raw_request, .output_report = uhid_hid_output_report, }; -EXPORT_SYMBOL_GPL(uhid_hid_driver); #ifdef CONFIG_COMPAT @@ -512,7 +491,7 @@ static int uhid_dev_create2(struct uhid_device *uhid, void *rd_data; int ret; - if (uhid->hid) + if (uhid->running) return -EALREADY; rd_size = ev->u.create2.rd_size; @@ -593,7 +572,7 @@ static int uhid_dev_create(struct uhid_device *uhid, static int uhid_dev_destroy(struct uhid_device *uhid) { - if (!uhid->hid) + if (!uhid->running) return -EINVAL; uhid->running = false; @@ -602,7 +581,6 @@ static int uhid_dev_destroy(struct uhid_device *uhid) cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); - uhid->hid = NULL; kfree(uhid->rd_data); return 0; diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 90f6aa9d5eb3..b0eeb5090c91 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -372,7 +372,7 @@ static int hid_submit_ctrl(struct hid_device *hid) raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; - len = hid_report_len(report); + len = ((report->size - 1) >> 3) + 1 + (report->id > 0); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); usbhid->urbctrl->transfer_buffer_length = len; @@ -500,7 +500,7 @@ static void hid_ctrl(struct urb *urb) if (unplug) { usbhid->ctrltail = usbhid->ctrlhead; - } else if (usbhid->ctrlhead != usbhid->ctrltail) { + } else { usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); if (usbhid->ctrlhead != usbhid->ctrltail && @@ -1185,20 +1185,9 @@ static void usbhid_stop(struct hid_device *hid) usbhid->intf->needs_remote_wakeup = 0; clear_bit(HID_STARTED, &usbhid->iofl); - spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); - while (usbhid->ctrltail != usbhid->ctrlhead) { - if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_OUT) { - kfree(usbhid->ctrl[usbhid->ctrltail].raw_report); - usbhid->ctrl[usbhid->ctrltail].raw_report = NULL; - } - - usbhid->ctrltail = (usbhid->ctrltail + 1) & - (HID_CONTROL_FIFO_SIZE - 1); - } spin_unlock_irq(&usbhid->lock); - usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbctrl); @@ -1272,7 +1261,7 @@ static int usbhid_idle(struct hid_device *hid, int report, int idle, return hid_set_idle(dev, ifnum, report, idle); } -struct hid_ll_driver usb_hid_driver = { +static struct hid_ll_driver usb_hid_driver = { .parse = usbhid_parse, .start = usbhid_start, .stop = usbhid_stop, @@ -1285,7 +1274,6 @@ struct hid_ll_driver usb_hid_driver = { .output_report = usbhid_output_report, .idle = usbhid_idle, }; -EXPORT_SYMBOL_GPL(usb_hid_driver); static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id) { diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c index bc75f1efa0f4..08174d341f4a 100644 --- a/drivers/hid/usbhid/hid-pidff.c +++ b/drivers/hid/usbhid/hid-pidff.c @@ -1304,7 +1304,6 @@ int hid_pidff_init(struct hid_device *hid) if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { - error = -EPERM; hid_notice(hid, "device does not support device managed pool\n"); goto fail; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 00afff48aaec..e06af5b9f59e 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -458,7 +458,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev, * Skip the query for this type and modify defaults based on * interface number. */ - if (features->type == WIRELESS && intf) { + if (features->type == WIRELESS) { if (intf->cur_altsetting->desc.bInterfaceNumber == 0) features->device_type = WACOM_DEVICETYPE_WL_MONITOR; else @@ -1512,9 +1512,6 @@ static void wacom_wireless_work(struct work_struct *work) wacom_destroy_battery(wacom); - if (!usbdev) - return; - /* Stylus interface */ hdev1 = usb_get_intfdata(usbdev->config->interface[1]); wacom1 = hid_get_drvdata(hdev1); @@ -1692,6 +1689,8 @@ static void wacom_update_name(struct wacom *wacom) static int wacom_probe(struct hid_device *hdev, const struct hid_device_id *id) { + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *dev = interface_to_usbdev(intf); struct wacom *wacom; struct wacom_wac *wacom_wac; struct wacom_features *features; @@ -1734,14 +1733,8 @@ static int wacom_probe(struct hid_device *hdev, goto fail_type; } - if (hid_is_usb(hdev)) { - struct usb_interface *intf = to_usb_interface(hdev->dev.parent); - struct usb_device *dev = interface_to_usbdev(intf); - - wacom->usbdev = dev; - wacom->intf = intf; - } - + wacom->usbdev = dev; + wacom->intf = intf; mutex_init(&wacom->lock); INIT_WORK(&wacom->work, wacom_wireless_work); diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index e1080f005a19..df380d55c58f 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -115,7 +115,6 @@ struct hsi_client *hsi_new_client(struct hsi_port *port, if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); - goto err; } return cl; @@ -224,6 +223,8 @@ static void hsi_add_client_from_dt(struct hsi_port *port, if (err) goto err; + dev_set_name(&cl->device, "%s", name); + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); if (err) { err = hsi_of_property_parse_mode(client, "hsi-rx-mode", @@ -306,7 +307,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port, cl->device.release = hsi_client_release; cl->device.of_node = client; - dev_set_name(&cl->device, "%s", name); if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", name); put_device(&cl->device); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 4e7592addfe2..15e06493c53a 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -26,7 +26,6 @@ #define _HYPERV_VMBUS_H #include -#include #include #include #include diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index d818d42aa3ab..a9356a3dea92 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -271,7 +271,7 @@ static int i8k_get_fan_nominal_speed(int fan, int speed) } /* - * Set the fan speed (off, low, high, ...). + * Set the fan speed (off, low, high). Returns the new fan status. */ static int i8k_set_fan(int fan, int speed) { @@ -280,7 +280,7 @@ static int i8k_set_fan(int fan, int speed) speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed); regs.ebx = (fan & 0xff) | (speed << 8); - return i8k_smm(®s); + return i8k_smm(®s) ? : i8k_get_fan_status(fan); } static int i8k_get_temp_type(int sensor) @@ -394,7 +394,7 @@ static int i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) { int val = 0; - int speed, err; + int speed; unsigned char buff[16]; int __user *argp = (int __user *)arg; @@ -451,11 +451,7 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) if (copy_from_user(&speed, argp + 1, sizeof(int))) return -EFAULT; - err = i8k_set_fan(val, speed); - if (err < 0) - return err; - - val = i8k_get_fan_status(val); + val = i8k_set_fan(val, speed); break; default: @@ -555,18 +551,15 @@ static const struct file_operations i8k_fops = { .unlocked_ioctl = i8k_ioctl, }; -static struct proc_dir_entry *entry; - static void __init i8k_init_procfs(void) { /* Register the proc entry */ - entry = proc_create("i8k", 0, NULL, &i8k_fops); + proc_create("i8k", 0, NULL, &i8k_fops); } static void __exit i8k_exit_procfs(void) { - if (entry) - remove_proc_entry("i8k", NULL); + remove_proc_entry("i8k", NULL); } #else diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c index ee6d499edc1b..be60bd5bab78 100644 --- a/drivers/hwmon/lm80.c +++ b/drivers/hwmon/lm80.c @@ -630,6 +630,7 @@ static int lm80_probe(struct i2c_client *client, struct device *dev = &client->dev; struct device *hwmon_dev; struct lm80_data *data; + int rv; data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL); if (!data) @@ -642,8 +643,14 @@ static int lm80_probe(struct i2c_client *client, lm80_init_client(client); /* A few vars need to be filled upon startup */ - data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1)); - data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2)); + rv = lm80_read_value(client, LM80_REG_FAN_MIN(1)); + if (rv < 0) + return rv; + data->fan[f_min][0] = rv; + rv = lm80_read_value(client, LM80_REG_FAN_MIN(2)); + if (rv < 0) + return rv; + data->fan[f_min][1] = rv; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, lm80_groups); diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 6f6f173aca6f..c9ff08dbe10c 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -265,7 +265,7 @@ static const struct lm90_params lm90_params[] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, - .max_convrate = 7, + .max_convrate = 8, }, [lm86] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT, @@ -1209,11 +1209,12 @@ static int lm90_detect(struct i2c_client *client, if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0) return -ENODEV; - if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) { + if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) { config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2); if (config2 < 0) return -ENODEV; - } + } else + config2 = 0; /* Make compiler happy */ if ((address == 0x4C || address == 0x4D) && man_id == 0x01) { /* National Semiconductor */ diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index 5081fd7e8fdd..a3d912cd3b8d 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c @@ -69,27 +69,22 @@ static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = { [lm25056] = { [PSC_VOLTAGE_IN] = { .m = 16296, - .b = 1343, .R = -2, }, [PSC_CURRENT_IN] = { .m = 13797, - .b = -1833, .R = -2, }, [PSC_CURRENT_IN_L] = { .m = 6726, - .b = -537, .R = -2, }, [PSC_POWER] = { .m = 5501, - .b = -2908, .R = -3, }, [PSC_POWER_L] = { .m = 26882, - .b = -5646, .R = -4, }, [PSC_TEMPERATURE] = { @@ -101,32 +96,26 @@ static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = { [lm25066] = { [PSC_VOLTAGE_IN] = { .m = 22070, - .b = -1800, .R = -2, }, [PSC_VOLTAGE_OUT] = { .m = 22070, - .b = -1800, .R = -2, }, [PSC_CURRENT_IN] = { .m = 13661, - .b = -5200, .R = -2, }, [PSC_CURRENT_IN_L] = { .m = 6852, - .b = -3100, .R = -2, }, [PSC_POWER] = { .m = 736, - .b = -3300, .R = -2, }, [PSC_POWER_L] = { .m = 369, - .b = -1900, .R = -2, }, [PSC_TEMPERATURE] = { @@ -166,32 +155,26 @@ static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = { [lm5064] = { [PSC_VOLTAGE_IN] = { .m = 4611, - .b = -642, .R = -2, }, [PSC_VOLTAGE_OUT] = { .m = 4621, - .b = 423, .R = -2, }, [PSC_CURRENT_IN] = { .m = 10742, - .b = 1552, .R = -2, }, [PSC_CURRENT_IN_L] = { .m = 5456, - .b = 2118, .R = -2, }, [PSC_POWER] = { .m = 1204, - .b = 8524, .R = -3, }, [PSC_POWER_L] = { .m = 612, - .b = 11202, .R = -3, }, [PSC_TEMPERATURE] = { @@ -201,32 +184,26 @@ static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = { [lm5066] = { [PSC_VOLTAGE_IN] = { .m = 4587, - .b = -1200, .R = -2, }, [PSC_VOLTAGE_OUT] = { .m = 4587, - .b = -2400, .R = -2, }, [PSC_CURRENT_IN] = { .m = 10753, - .b = -1200, .R = -2, }, [PSC_CURRENT_IN_L] = { .m = 5405, - .b = -600, .R = -2, }, [PSC_POWER] = { .m = 1204, - .b = -6000, .R = -3, }, [PSC_POWER_L] = { .m = 605, - .b = -8000, .R = -3, }, [PSC_TEMPERATURE] = { diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index e585b29ce738..189eb6269971 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -485,7 +485,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, output->active = false; for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS + 1) { + TH_CONFIGURABLE_MASTERS) { gth_master_set(gth, master, -1); } spin_unlock(>h->gth_lock); @@ -597,7 +597,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, othdev->output.port = -1; othdev->output.active = false; gth->output[port].output = NULL; - for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++) + for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) if (gth->master[master] == port) gth->master[master] = -1; spin_unlock(>h->gth_lock); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 6de7c2ca8b11..151e9c039957 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -778,7 +778,7 @@ config I2C_PXA_SLAVE config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM || COMPILE_TEST + depends on ARCH_QCOM help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index d0a8308ea8ed..3032b89ac60b 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -28,11 +28,6 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 -/* - * 16-bit field for the number of SCL cycles to wait after rising SCL - * before deciding the slave is not responding. 0 disables the - * timeout detection. - */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -299,12 +294,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - /* - * Disable the hardware clock stretching timeout. SMBUS - * specifies a limit for how long the device can stretch the - * clock, but core I2C doesn't. - */ - bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 6e9007adad84..81115abf3c1f 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -304,7 +304,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev, goto cmd_out; } - if ((cmd == CMD_RD || cmd == CMD_WR) && + if ((CMD_RD || CMD_WR) && bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) { rc = -EREMOTEIO; dev_dbg(dev->device, "controller received NOACK intr for %s\n", diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 7d15c9143d16..84deed6571bd 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -894,10 +894,7 @@ static int cdns_i2c_probe(struct platform_device *pdev) if (IS_ERR(id->membase)) return PTR_ERR(id->membase); - ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; - id->irq = ret; + id->irq = platform_get_irq(pdev, 0); id->adap.owner = THIS_MODULE; id->adap.dev.of_node = pdev->dev.of_node; diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 683188374121..1543d35d228d 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -53,10 +53,10 @@ enum dw_pci_ctl_id_t { }; struct dw_scl_sda_cfg { - u16 ss_hcnt; - u16 fs_hcnt; - u16 ss_lcnt; - u16 fs_lcnt; + u32 ss_hcnt; + u32 fs_hcnt; + u32 ss_lcnt; + u32 fs_lcnt; u32 sda_hold; }; diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c index 9ad031ea3300..56dc69e7349f 100644 --- a/drivers/i2c/busses/i2c-highlander.c +++ b/drivers/i2c/busses/i2c-highlander.c @@ -382,7 +382,7 @@ static int highlander_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); dev->irq = platform_get_irq(pdev, 0); - if (dev->irq < 0 || iic_force_poll) + if (iic_force_poll) dev->irq = 0; if (dev->irq) { diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 73026c00220c..f78069cd8d53 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -327,9 +327,11 @@ static int i801_check_post(struct i801_priv *priv, int status) dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); /* try to stop the current command */ dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); - outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv)); + outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, + SMBHSTCNT(priv)); usleep_range(1000, 2000); - outb_p(0, SMBHSTCNT(priv)); + outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), + SMBHSTCNT(priv)); /* Check if it worked */ status = inb_p(SMBHSTSTS(priv)); @@ -669,11 +671,6 @@ static int i801_block_transaction(struct i801_priv *priv, int result = 0; unsigned char hostc; - if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA) - data->block[0] = I2C_SMBUS_BLOCK_MAX; - else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) - return -EPROTO; - if (command == I2C_SMBUS_I2C_BLOCK_DATA) { if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ @@ -687,6 +684,16 @@ static int i801_block_transaction(struct i801_priv *priv, } } + if (read_write == I2C_SMBUS_WRITE + || command == I2C_SMBUS_I2C_BLOCK_DATA) { + if (data->block[0] < 1) + data->block[0] = 1; + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + } else { + data->block[0] = 32; /* max for SMBus block reads */ + } + /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 117f367636b8..37303a7a2e73 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -420,19 +420,6 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) /** Functions for IMX I2C adapter driver *************************************** *******************************************************************************/ -static void i2c_imx_clear_irq(struct imx_i2c_struct *i2c_imx, unsigned int bits) -{ - unsigned int temp; - - /* - * i2sr_clr_opcode is the value to clear all interrupts. Here we want to - * clear only , so we write ~i2sr_clr_opcode with just - * toggled. This is required because i.MX needs W0C and Vybrid uses W1C. - */ - temp = ~i2c_imx->hwdata->i2sr_clr_opcode ^ bits; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); -} - static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) { unsigned long orig_jiffies = jiffies; @@ -445,7 +432,8 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) /* check for arbitration lost */ if (temp & I2SR_IAL) { - i2c_imx_clear_irq(i2c_imx, I2SR_IAL); + temp &= ~I2SR_IAL; + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); return -EAGAIN; } @@ -472,16 +460,6 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } - - /* check for arbitration lost */ - if (i2c_imx->i2csr & I2SR_IAL) { - dev_dbg(&i2c_imx->adapter.dev, "<%s> Arbitration lost\n", __func__); - i2c_imx_clear_irq(i2c_imx, I2SR_IAL); - - i2c_imx->i2csr = 0; - return -EAGAIN; - } - dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; @@ -617,7 +595,9 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) if (temp & I2SR_IIF) { /* save status register */ i2c_imx->i2csr = temp; - i2c_imx_clear_irq(i2c_imx, I2SR_IIF); + temp &= ~I2SR_IIF; + temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF); + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR); wake_up(&i2c_imx->queue); return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c index 6b9031ccd767..72d6161cf77c 100644 --- a/drivers/i2c/busses/i2c-iop3xx.c +++ b/drivers/i2c/busses/i2c-iop3xx.c @@ -459,14 +459,16 @@ iop3xx_i2c_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - ret = irq; + ret = -ENXIO; goto unmap; } ret = request_irq(irq, iop3xx_i2c_irq_handler, 0, pdev->name, adapter_data); - if (ret) + if (ret) { + ret = -EIO; goto unmap; + } memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); new_adapter->owner = THIS_MODULE; diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index d80cee068bea..ba3b94505c14 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -754,10 +754,7 @@ static int jz4780_i2c_probe(struct platform_device *pdev) jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0); - ret = platform_get_irq(pdev, 0); - if (ret < 0) - goto err; - i2c->irq = ret; + i2c->irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret) diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 988ea9df6654..48ecffecc0ed 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -50,7 +49,6 @@ #define CCR_MTX 0x10 #define CCR_TXAK 0x08 #define CCR_RSTA 0x04 -#define CCR_RSVD 0x02 #define CSR_MCF 0x80 #define CSR_MAAS 0x40 @@ -72,7 +70,6 @@ struct mpc_i2c { u8 fdr, dfsrr; #endif struct clk *clk_per; - bool has_errata_A004447; }; struct mpc_i2c_divider { @@ -107,30 +104,23 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates - * the 9 pulses, each with a START then ending with STOP, so it's all OK. + * the 9 pulses, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { int k; - unsigned long flags; + u32 delay_val = 1000000 / i2c->real_clk + 1; + + if (delay_val < 2) + delay_val = 2; for (k = 9; k; k--) { writeccr(i2c, 0); - writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */ - writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */ - readb(i2c->base + MPC_I2C_DR); /* init xfer */ - udelay(15); /* let it hit the bus */ - local_irq_save(flags); /* should not be delayed further */ - writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */ + writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); readb(i2c->base + MPC_I2C_DR); - if (k != 1) - udelay(5); - local_irq_restore(flags); + writeccr(i2c, CCR_MEN); + udelay(delay_val << 1); } - writeccr(i2c, CCR_MEN); /* Initiate STOP */ - readb(i2c->base + MPC_I2C_DR); - udelay(15); /* Let STOP propagate */ - writeccr(i2c, 0); } static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) @@ -188,75 +178,6 @@ static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) return 0; } -static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask) -{ - void __iomem *addr = i2c->base + MPC_I2C_SR; - u8 val; - - return readb_poll_timeout(addr, val, val & mask, 0, 100); -} - -/* - * Workaround for Erratum A004447. From the P2040CE Rev Q - * - * 1. Set up the frequency divider and sampling rate. - * 2. I2CCR - a0h - * 3. Poll for I2CSR[MBB] to get set. - * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to - * step 5. If MAL is not set, then go to step 13. - * 5. I2CCR - 00h - * 6. I2CCR - 22h - * 7. I2CCR - a2h - * 8. Poll for I2CSR[MBB] to get set. - * 9. Issue read to I2CDR. - * 10. Poll for I2CSR[MIF] to be set. - * 11. I2CCR - 82h - * 12. Workaround complete. Skip the next steps. - * 13. Issue read to I2CDR. - * 14. Poll for I2CSR[MIF] to be set. - * 15. I2CCR - 80h - */ -static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c) -{ - int ret; - u32 val; - - writeccr(i2c, CCR_MEN | CCR_MSTA); - ret = i2c_mpc_wait_sr(i2c, CSR_MBB); - if (ret) { - dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); - return; - } - - val = readb(i2c->base + MPC_I2C_SR); - - if (val & CSR_MAL) { - writeccr(i2c, 0x00); - writeccr(i2c, CCR_MSTA | CCR_RSVD); - writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD); - ret = i2c_mpc_wait_sr(i2c, CSR_MBB); - if (ret) { - dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); - return; - } - val = readb(i2c->base + MPC_I2C_DR); - ret = i2c_mpc_wait_sr(i2c, CSR_MIF); - if (ret) { - dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); - return; - } - writeccr(i2c, CCR_MEN | CCR_RSVD); - } else { - val = readb(i2c->base + MPC_I2C_DR); - ret = i2c_mpc_wait_sr(i2c, CSR_MIF); - if (ret) { - dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); - return; - } - writeccr(i2c, CCR_MEN); - } -} - #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x) static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23}, @@ -660,7 +581,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { writeb(status & ~CSR_MAL, i2c->base + MPC_I2C_SR); - i2c_recover_bus(&i2c->adap); + mpc_i2c_fixup(i2c); } return -EIO; } @@ -696,7 +617,7 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { writeb(status & ~CSR_MAL, i2c->base + MPC_I2C_SR); - i2c_recover_bus(&i2c->adap); + mpc_i2c_fixup(i2c); } return -EIO; } @@ -711,18 +632,6 @@ static u32 mpc_functionality(struct i2c_adapter *adap) | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } -static int fsl_i2c_bus_recovery(struct i2c_adapter *adap) -{ - struct mpc_i2c *i2c = i2c_get_adapdata(adap); - - if (i2c->has_errata_A004447) - mpc_i2c_fixup_A004447(i2c); - else - mpc_i2c_fixup(i2c); - - return 0; -} - static const struct i2c_algorithm mpc_algo = { .master_xfer = mpc_xfer, .functionality = mpc_functionality, @@ -734,10 +643,6 @@ static struct i2c_adapter mpc_ops = { .timeout = HZ, }; -static struct i2c_bus_recovery_info fsl_i2c_recovery_info = { - .recover_bus = fsl_i2c_bus_recovery, -}; - static const struct of_device_id mpc_i2c_of_match[]; static int fsl_i2c_probe(struct platform_device *op) { @@ -822,8 +727,6 @@ static int fsl_i2c_probe(struct platform_device *op) dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ); platform_set_drvdata(op, i2c); - if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447")) - i2c->has_errata_A004447 = true; i2c->adap = mpc_ops; of_address_to_resource(op->dev.of_node, 0, &res); @@ -832,7 +735,6 @@ static int fsl_i2c_probe(struct platform_device *op) i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &op->dev; i2c->adap.dev.of_node = of_node_get(op->dev.of_node); - i2c->adap.bus_recovery_info = &fsl_i2c_recovery_info; result = i2c_add_adapter(&i2c->adap); if (result < 0) { diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 42a998fa5f79..9b867169142f 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -639,7 +639,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) return PTR_ERR(i2c->pdmabase); irq = platform_get_irq(pdev, 0); - if (irq < 0) + if (irq <= 0) return irq; init_completion(&i2c->msg_complete); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 587f1a5a1024..9096d17beb5b 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -325,8 +325,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) if (!(ipd & REG_INT_MBRF)) return; - /* ack interrupt (read also produces a spurious START flag, clear it too) */ - i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); + /* ack interrupt */ + i2c_writel(i2c, REG_INT_MBRF, REG_IPD); /* Can only handle a maximum of 32 bytes at a time */ if (len > 32) diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c index 09f27060feca..ced9c6a308d1 100644 --- a/drivers/i2c/busses/i2c-robotfuzz-osif.c +++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c @@ -89,7 +89,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, } } - ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); + ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0); if (ret) { dev_err(&adapter->dev, "failure sending STOP\n"); return -EREMOTEIO; @@ -159,7 +159,7 @@ static int osif_probe(struct usb_interface *interface, * Set bus frequency. The frequency is: * 120,000,000 / ( 16 + 2 * div * 4^prescale). * Using dev = 52, prescale = 0 give 100KHz */ - ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, + ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0, NULL, 0); if (ret) { dev_err(&interface->dev, "failure sending bit rate"); diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 44af640496bb..5df819610d52 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -499,10 +499,8 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) /* cannot do this, the controller * forces us to send a new START * when we change direction */ - dev_dbg(i2c->dev, - "missing START before write->read\n"); + s3c24xx_i2c_stop(i2c, -EINVAL); - break; } goto retry_write; @@ -1213,7 +1211,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) if (!(i2c->quirks & QUIRK_POLL)) { i2c->irq = ret = platform_get_irq(pdev, 0); - if (ret < 0) { + if (ret <= 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); clk_unprepare(i2c->clk); return ret; diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index c836c53caa3f..24968384b401 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -471,10 +471,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev) goto out2; } - ret = platform_get_irq(pdev, 0); - if (ret < 0) - goto out3; - id->irq = ret; + id->irq = platform_get_irq(pdev, 0); id->adap.nr = pdev->id; id->adap.algo = &sh7760_i2c_algo; diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index d0340b134e72..7584f292e2fd 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, if (count > 8192) count = 8192; - tmp = kzalloc(count, GFP_KERNEL); + tmp = kmalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; @@ -157,8 +157,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, ret = i2c_master_recv(client, tmp, count); if (ret >= 0) - if (copy_to_user(buf, tmp, ret)) - ret = -EFAULT; + ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; kfree(tmp); return ret; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index a6cc32a1e644..08a21d635d0d 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -704,7 +704,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = - queue_logical_block_size(q) >> SECTOR_SHIFT; + queue_logical_block_size(q) >> SECTOR_BITS; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", @@ -900,7 +900,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); - blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT; + blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; switch (blocklen) { case 512: case 1024: @@ -916,7 +916,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, } *capacity = 1 + be32_to_cpu(capbuf.lba); - *sectors_per_frame = blocklen >> SECTOR_SHIFT; + *sectors_per_frame = blocklen >> SECTOR_BITS; ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", *capacity, *sectors_per_frame); @@ -993,7 +993,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) drive->probed_capacity = toc->capacity * sectors_per_frame; blk_queue_logical_block_size(drive->queue, - sectors_per_frame << SECTOR_SHIFT); + sectors_per_frame << SECTOR_BITS); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 7c6d017e84e9..1efc936f5b66 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h @@ -20,7 +20,11 @@ /************************************************************************/ -#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT) +#define SECTOR_BITS 9 +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_BITS) +#endif +#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS) #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32) /* Capabilities Page size including 8 bytes of Mode Page Header */ diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 057c9df500d3..f04b88406995 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -49,7 +49,7 @@ struct bma180_part_info { u8 int_reset_reg, int_reset_mask; u8 sleep_reg, sleep_mask; - u8 bw_reg, bw_mask, bw_offset; + u8 bw_reg, bw_mask; u8 scale_reg, scale_mask; u8 power_reg, power_mask, lowpower_val; u8 int_enable_reg, int_enable_mask; @@ -105,7 +105,6 @@ struct bma180_part_info { #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */ #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */ -#define BMA250_BW_OFFSET 8 #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */ #define BMA250_LOWPOWER_MASK BIT(6) #define BMA250_DATA_INTEN_MASK BIT(4) @@ -121,11 +120,7 @@ struct bma180_data { int scale; int bw; bool pmode; - /* Ensure timestamp is naturally aligned */ - struct { - s16 chan[4]; - s64 timestamp __aligned(8); - } scan; + u8 buff[16]; /* 3x 16-bit + 8-bit + padding + timestamp */ }; enum bma180_chan { @@ -243,8 +238,7 @@ static int bma180_set_bw(struct bma180_data *data, int val) for (i = 0; i < data->part_info->num_bw; ++i) { if (data->part_info->bw_table[i] == val) { ret = bma180_set_bits(data, data->part_info->bw_reg, - data->part_info->bw_mask, - i + data->part_info->bw_offset); + data->part_info->bw_mask, i); if (ret) { dev_err(&data->client->dev, "failed to set bandwidth\n"); @@ -626,53 +620,32 @@ static const struct iio_chan_spec bma250_channels[] = { static const struct bma180_part_info bma180_part_info[] = { [BMA180] = { - .channels = bma180_channels, - .num_channels = ARRAY_SIZE(bma180_channels), - .scale_table = bma180_scale_table, - .num_scales = ARRAY_SIZE(bma180_scale_table), - .bw_table = bma180_bw_table, - .num_bw = ARRAY_SIZE(bma180_bw_table), - .int_reset_reg = BMA180_CTRL_REG0, - .int_reset_mask = BMA180_RESET_INT, - .sleep_reg = BMA180_CTRL_REG0, - .sleep_mask = BMA180_SLEEP, - .bw_reg = BMA180_BW_TCS, - .bw_mask = BMA180_BW, - .scale_reg = BMA180_OFFSET_LSB1, - .scale_mask = BMA180_RANGE, - .power_reg = BMA180_TCO_Z, - .power_mask = BMA180_MODE_CONFIG, - .lowpower_val = BMA180_LOW_POWER, - .int_enable_reg = BMA180_CTRL_REG3, - .int_enable_mask = BMA180_NEW_DATA_INT, - .softreset_reg = BMA180_RESET, - .chip_config = bma180_chip_config, - .chip_disable = bma180_chip_disable, + bma180_channels, ARRAY_SIZE(bma180_channels), + bma180_scale_table, ARRAY_SIZE(bma180_scale_table), + bma180_bw_table, ARRAY_SIZE(bma180_bw_table), + BMA180_CTRL_REG0, BMA180_RESET_INT, + BMA180_CTRL_REG0, BMA180_SLEEP, + BMA180_BW_TCS, BMA180_BW, + BMA180_OFFSET_LSB1, BMA180_RANGE, + BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER, + BMA180_CTRL_REG3, BMA180_NEW_DATA_INT, + BMA180_RESET, + bma180_chip_config, + bma180_chip_disable, }, [BMA250] = { - .channels = bma250_channels, - .num_channels = ARRAY_SIZE(bma250_channels), - .scale_table = bma250_scale_table, - .num_scales = ARRAY_SIZE(bma250_scale_table), - .bw_table = bma250_bw_table, - .num_bw = ARRAY_SIZE(bma250_bw_table), - .int_reset_reg = BMA250_INT_RESET_REG, - .int_reset_mask = BMA250_INT_RESET_MASK, - .sleep_reg = BMA250_POWER_REG, - .sleep_mask = BMA250_SUSPEND_MASK, - .bw_reg = BMA250_BW_REG, - .bw_mask = BMA250_BW_MASK, - .bw_offset = BMA250_BW_OFFSET, - .scale_reg = BMA250_RANGE_REG, - .scale_mask = BMA250_RANGE_MASK, - .power_reg = BMA250_POWER_REG, - .power_mask = BMA250_LOWPOWER_MASK, - .lowpower_val = 1, - .int_enable_reg = BMA250_INT_ENABLE_REG, - .int_enable_mask = BMA250_DATA_INTEN_MASK, - .softreset_reg = BMA250_RESET_REG, - .chip_config = bma250_chip_config, - .chip_disable = bma250_chip_disable, + bma250_channels, ARRAY_SIZE(bma250_channels), + bma250_scale_table, ARRAY_SIZE(bma250_scale_table), + bma250_bw_table, ARRAY_SIZE(bma250_bw_table), + BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK, + BMA250_POWER_REG, BMA250_SUSPEND_MASK, + BMA250_BW_REG, BMA250_BW_MASK, + BMA250_RANGE_REG, BMA250_RANGE_MASK, + BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1, + BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK, + BMA250_RESET_REG, + bma250_chip_config, + bma250_chip_disable, }, }; @@ -693,12 +666,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p) mutex_unlock(&data->mutex); goto err; } - data->scan.chan[i++] = ret; + ((s16 *)data->buff)[i++] = ret; } mutex_unlock(&data->mutex); - iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns); + iio_push_to_buffers_with_timestamp(indio_dev, data->buff, time_ns); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index d139073ea48f..0667b2875ee4 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1284,7 +1284,8 @@ static int kxcjk1013_probe(struct i2c_client *client, err_iio_unregister: iio_device_unregister(indio_dev); err_buffer_cleanup: - iio_triggered_buffer_cleanup(indio_dev); + if (data->dready_trig) + iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: if (data->dready_trig) iio_trigger_unregister(data->dready_trig); @@ -1307,8 +1308,8 @@ static int kxcjk1013_remove(struct i2c_client *client) iio_device_unregister(indio_dev); - iio_triggered_buffer_cleanup(indio_dev); if (data->dready_trig) { + iio_triggered_buffer_cleanup(indio_dev); iio_trigger_unregister(data->dready_trig); iio_trigger_unregister(data->motion_trig); } diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index cc9b9344e208..d44c1b3a131c 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1011,7 +1011,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev) if (ret) return ret; - indio_dev->trig = iio_trigger_get(trig); + indio_dev->trig = trig; return 0; } diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c index 945c80183f35..85fe7f7247c1 100644 --- a/drivers/iio/accel/stk8312.c +++ b/drivers/iio/accel/stk8312.c @@ -107,11 +107,7 @@ struct stk8312_data { u8 mode; struct iio_trigger *dready_trig; bool dready_trigger_on; - /* Ensure timestamp is naturally aligned */ - struct { - s8 chans[3]; - s64 timestamp __aligned(8); - } scan; + s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */ }; static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL); @@ -448,7 +444,7 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p) ret = i2c_smbus_read_i2c_block_data(data->client, STK8312_REG_XOUT, STK8312_ALL_CHANNEL_SIZE, - data->scan.chans); + data->buffer); if (ret < STK8312_ALL_CHANNEL_SIZE) { dev_err(&data->client->dev, "register read failed\n"); mutex_unlock(&data->lock); @@ -462,12 +458,12 @@ static irqreturn_t stk8312_trigger_handler(int irq, void *p) mutex_unlock(&data->lock); goto err; } - data->scan.chans[i++] = ret; + data->buffer[i++] = ret; } } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c index b6e2d15024c8..5709d9eb8f34 100644 --- a/drivers/iio/accel/stk8ba50.c +++ b/drivers/iio/accel/stk8ba50.c @@ -95,11 +95,12 @@ struct stk8ba50_data { u8 sample_rate_idx; struct iio_trigger *dready_trig; bool dready_trigger_on; - /* Ensure timestamp is naturally aligned */ - struct { - s16 chans[3]; - s64 timetamp __aligned(8); - } scan; + /* + * 3 x 16-bit channels (10-bit data, 6-bit padding) + + * 1 x 16 padding + + * 4 x 16 64-bit timestamp + */ + s16 buffer[8]; }; #define STK8BA50_ACCEL_CHANNEL(index, reg, axis) { \ @@ -329,7 +330,7 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p) ret = i2c_smbus_read_i2c_block_data(data->client, STK8BA50_REG_XOUT, STK8BA50_ALL_CHANNEL_SIZE, - (u8 *)data->scan.chans); + (u8 *)data->buffer); if (ret < STK8BA50_ALL_CHANNEL_SIZE) { dev_err(&data->client->dev, "register read failed\n"); goto err; @@ -342,10 +343,10 @@ static irqreturn_t stk8ba50_trigger_handler(int irq, void *p) if (ret < 0) goto err; - data->scan.chans[i++] = ret; + data->buffer[i++] = ret; } } - iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, pf->timestamp); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 2e89937b5629..fe0c5a155e21 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -279,7 +279,6 @@ static int ad7793_setup(struct iio_dev *indio_dev, id &= AD7793_ID_MASK; if (id != st->chip_info->id) { - ret = -ENODEV; dev_err(&st->sd.spi->dev, "device ID query failed\n"); goto out; } diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 1d429134b1ef..d095efe1ba14 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -107,7 +107,6 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; - int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -134,14 +133,8 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - ret = iio_device_register(indio_dev); - if (ret) - goto err_unmap; - - return 0; + return iio_device_register(indio_dev); -err_unmap: - iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index df852d395811..aa295e3327cf 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -198,8 +198,7 @@ #define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3 #define FG_RR_ADC_STS_CHANNEL_STS 0x2 -#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50 -#define FG_RR_CONV_CONT_CBK_TIME_MIN_MS 10 +#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50 #define FG_RR_CONV_MAX_RETRY_CNT 50 #define FG_RR_TP_REV_VERSION1 21 #define FG_RR_TP_REV_VERSION2 29 @@ -257,11 +256,6 @@ struct rradc_chip { struct pmic_revid_data *pmic_fab_id; int volt; struct power_supply *usb_trig; - struct power_supply *batt_psy; - struct power_supply *bms_psy; - struct notifier_block nb; - bool conv_cbk; - struct work_struct psy_notify_work; }; struct rradc_channels { @@ -706,28 +700,6 @@ static const struct rradc_channels rradc_chans[] = { FG_ADC_RR_AUX_THERM_STS) }; -static bool rradc_is_batt_psy_available(struct rradc_chip *chip) -{ - if (!chip->batt_psy) - chip->batt_psy = power_supply_get_by_name("battery"); - - if (!chip->batt_psy) - return false; - - return true; -} - -static bool rradc_is_bms_psy_available(struct rradc_chip *chip) -{ - if (!chip->bms_psy) - chip->bms_psy = power_supply_get_by_name("bms"); - - if (!chip->bms_psy) - return false; - - return true; -} - static int rradc_enable_continuous_mode(struct rradc_chip *chip) { int rc = 0; @@ -797,7 +769,6 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, struct rradc_chan_prop *prop, u8 *buf, u16 status) { int rc = 0, retry_cnt = 0, mask = 0; - union power_supply_propval pval = {0, }; switch (prop->channel) { case RR_ADC_BATT_ID: @@ -824,11 +795,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, break; } - if ((chip->conv_cbk) && (prop->channel == RR_ADC_USBIN_V)) - msleep(FG_RR_CONV_CONT_CBK_TIME_MIN_MS); - else - msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS); - + msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS); retry_cnt++; rc = rradc_read(chip, status, buf, 1); if (rc < 0) { @@ -837,26 +804,8 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, } } - if ((retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT) && - ((prop->channel != RR_ADC_DCIN_V) || - (prop->channel != RR_ADC_DCIN_I))) { - pr_err("rradc is hung, Proceed to recovery\n"); - if (rradc_is_bms_psy_available(chip)) { - rc = power_supply_set_property(chip->bms_psy, - POWER_SUPPLY_PROP_FG_RESET_CLOCK, - &pval); - if (rc < 0) { - pr_err("Couldn't reset FG clock rc=%d\n", rc); - return rc; - } - } else { - pr_err("Error obtaining bms power supply\n"); - rc = -EINVAL; - } - } else { - if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT) - rc = -ENODATA; - } + if (retry_cnt >= FG_RR_CONV_MAX_RETRY_CNT) + rc = -ENODATA; return rc; } @@ -1144,67 +1093,6 @@ static int rradc_read_raw(struct iio_dev *indio_dev, return rc; } -static void psy_notify_work(struct work_struct *work) -{ - struct rradc_chip *chip = container_of(work, - struct rradc_chip, psy_notify_work); - - struct rradc_chan_prop *prop; - union power_supply_propval pval = {0, }; - u16 adc_code; - int rc = 0; - - if (rradc_is_batt_psy_available(chip)) { - rc = power_supply_get_property(chip->batt_psy, - POWER_SUPPLY_PROP_STATUS, &pval); - if (rc < 0) - pr_err("Error obtaining battery status, rc=%d\n", rc); - - if (pval.intval == POWER_SUPPLY_STATUS_CHARGING) { - chip->conv_cbk = true; - prop = &chip->chan_props[RR_ADC_USBIN_V]; - rc = rradc_do_conversion(chip, prop, &adc_code); - if (rc == -ENODATA) { - pr_err("rradc is hung, Proceed to recovery\n"); - if (rradc_is_bms_psy_available(chip)) { - rc = power_supply_set_property - (chip->bms_psy, - POWER_SUPPLY_PROP_FG_RESET_CLOCK, - &pval); - if (rc < 0) - pr_err("Couldn't reset FG clock rc=%d\n", - rc); - prop = &chip->chan_props[RR_ADC_BATT_ID]; - rc = rradc_do_conversion(chip, prop, - &adc_code); - if (rc == -ENODATA) - pr_err("RRADC read failed after reset"); - } else { - pr_err("Error obtaining bms power supply"); - } - } - } - } else { - pr_err("Error obtaining battery power supply"); - } - chip->conv_cbk = false; - pm_relax(chip->dev); -} - -static int rradc_psy_notifier_cb(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct power_supply *psy = data; - struct rradc_chip *chip = container_of(nb, struct rradc_chip, nb); - - if (strcmp(psy->desc->name, "battery") == 0) { - pm_stay_awake(chip->dev); - schedule_work(&chip->psy_notify_work); - } - - return NOTIFY_OK; -} - static const struct iio_info rradc_info = { .read_raw = &rradc_read_raw, .driver_module = THIS_MODULE, @@ -1349,20 +1237,6 @@ static int rradc_probe(struct platform_device *pdev) if (!chip->usb_trig) pr_debug("Error obtaining usb power supply\n"); - chip->batt_psy = power_supply_get_by_name("battery"); - if (!chip->batt_psy) - pr_debug("Error obtaining battery power supply\n"); - - chip->bms_psy = power_supply_get_by_name("bms"); - if (!chip->bms_psy) - pr_debug("Error obtaining bms power supply\n"); - - chip->nb.notifier_call = rradc_psy_notifier_cb; - rc = power_supply_reg_notifier(&chip->nb); - if (rc < 0) - pr_err("Error registering psy notifier rc = %d\n", rc); - INIT_WORK(&chip->psy_notify_work, psy_notify_work); - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index da91e9e9ed8f..dffff64b5989 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -359,7 +359,7 @@ static int rockchip_saradc_resume(struct device *dev) ret = clk_prepare_enable(info->clk); if (ret) - clk_disable_unprepare(info->pclk); + return ret; return ret; } diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c index 27b8de41e34a..ff6f7f63c8d9 100644 --- a/drivers/iio/adc/ti-adc128s052.c +++ b/drivers/iio/adc/ti-adc128s052.c @@ -159,13 +159,7 @@ static int adc128_probe(struct spi_device *spi) mutex_init(&adc->lock); ret = iio_device_register(indio_dev); - if (ret) - goto err_disable_regulator; - return 0; - -err_disable_regulator: - regulator_disable(adc->reg); return ret; } diff --git a/drivers/iio/common/ssp_sensors/ssp_spi.c b/drivers/iio/common/ssp_sensors/ssp_spi.c index 645749b90ec0..704284a475ae 100644 --- a/drivers/iio/common/ssp_sensors/ssp_spi.c +++ b/drivers/iio/common/ssp_sensors/ssp_spi.c @@ -147,7 +147,7 @@ static int ssp_print_mcu_debug(char *data_frame, int *data_index, if (length > received_len - *data_index || length <= 0) { ssp_dbg("[SSP]: MSG From MCU-invalid debug length(%d/%d)\n", length, received_len); - return -EPROTO; + return length ? length : -EPROTO; } ssp_dbg("[SSP]: MSG From MCU - %s\n", &data_frame[*data_index]); @@ -286,8 +286,6 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len) for (idx = 0; idx < len;) { switch (dataframe[idx++]) { case SSP_MSG2AP_INST_BYPASS_DATA: - if (idx >= len) - return -EPROTO; sd = dataframe[idx++]; if (sd < 0 || sd >= SSP_SENSOR_MAX) { dev_err(SSP_DEV, @@ -297,13 +295,10 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len) if (indio_devs[sd]) { spd = iio_priv(indio_devs[sd]); - if (spd->process_data) { - if (idx >= len) - return -EPROTO; + if (spd->process_data) spd->process_data(indio_devs[sd], &dataframe[idx], data->timestamp); - } } else { dev_err(SSP_DEV, "no client for frame\n"); } @@ -311,8 +306,6 @@ static int ssp_parse_dataframe(struct ssp_data *data, char *dataframe, int len) idx += ssp_offset_map[sd]; break; case SSP_MSG2AP_INST_DEBUG_DATA: - if (idx >= len) - return -EPROTO; sd = ssp_print_mcu_debug(dataframe, &idx, len); if (sd) { dev_err(SSP_DEV, diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c index d3a3d62869d8..b555552a0d80 100644 --- a/drivers/iio/dac/ad5446.c +++ b/drivers/iio/dac/ad5446.c @@ -510,15 +510,8 @@ static int ad5622_write(struct ad5446_state *st, unsigned val) { struct i2c_client *client = to_i2c_client(st->dev); __be16 data = cpu_to_be16(val); - int ret; - - ret = i2c_master_send(client, (char *)&data, sizeof(data)); - if (ret < 0) - return ret; - if (ret != sizeof(data)) - return -EIO; - return 0; + return i2c_master_send(client, (char *)&data, sizeof(data)); } /** diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index 0367641aed07..4e4c20d6d8b5 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -189,9 +189,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, return ret; if (pwr_down) - st->pwr_down_mask &= ~(1 << chan->channel); - else st->pwr_down_mask |= (1 << chan->channel); + else + st->pwr_down_mask &= ~(1 << chan->channel); ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c index e5cefdb674f8..5489ec43b95d 100644 --- a/drivers/iio/dac/ad5624r_spi.c +++ b/drivers/iio/dac/ad5624r_spi.c @@ -231,7 +231,7 @@ static int ad5624r_probe(struct spi_device *spi) if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); - st->reg = devm_regulator_get_optional(&spi->dev, "vref"); + st->reg = devm_regulator_get(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) @@ -242,22 +242,6 @@ static int ad5624r_probe(struct spi_device *spi) goto error_disable_reg; voltage_uv = ret; - } else { - if (PTR_ERR(st->reg) != -ENODEV) - return PTR_ERR(st->reg); - /* Backwards compatibility. This naming is not correct */ - st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); - if (!IS_ERR(st->reg)) { - ret = regulator_enable(st->reg); - if (ret) - return ret; - - ret = regulator_get_voltage(st->reg); - if (ret < 0) - goto error_disable_reg; - - voltage_uv = ret; - } } spi_set_drvdata(spi, indio_dev); diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c index 7157b1a731a6..e04483254b28 100644 --- a/drivers/iio/gyro/itg3200_buffer.c +++ b/drivers/iio/gyro/itg3200_buffer.c @@ -64,9 +64,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p) iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp); -error_ret: iio_trigger_notify_done(indio_dev->trig); +error_ret: return IRQ_HANDLED; } diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index c0eb9dfd1c45..90c24a23c679 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -37,11 +37,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); - if (!adis->buffer) { - kfree(adis->xfer); - adis->xfer = NULL; + if (!adis->buffer) return -ENOMEM; - } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c index 625f54d9e382..36607d52fee0 100644 --- a/drivers/iio/imu/adis_buffer.c +++ b/drivers/iio/imu/adis_buffer.c @@ -39,11 +39,8 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(indio_dev->scan_bytes * 2, GFP_KERNEL); - if (!adis->buffer) { - kfree(adis->xfer); - adis->xfer = NULL; + if (!adis->buffer) return -ENOMEM; - } rx = adis->buffer; tx = rx + scan_count; @@ -83,6 +80,9 @@ static irqreturn_t adis_trigger_handler(int irq, void *p) struct adis *adis = iio_device_get_drvdata(indio_dev); int ret; + if (!adis->buffer) + return -ENOMEM; + if (adis->data->has_paging) { mutex_lock(&adis->txrx_lock); if (adis->current_page != 0) { diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index d3cdd742972f..864a61b05665 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -1281,6 +1281,9 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, indio_dev->masklength, in_ind + 1); while (in_ind != out_ind) { + in_ind = find_next_bit(indio_dev->active_scan_mask, + indio_dev->masklength, + in_ind + 1); ch = iio_find_channel_from_si(indio_dev, in_ind); if (ch->scan_type.repeat > 1) length = ch->scan_type.storagebits / 8 * @@ -1289,9 +1292,6 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev, length = ch->scan_type.storagebits / 8; /* Make sure we are aligned */ in_loc = roundup(in_loc, length) + length; - in_ind = find_next_bit(indio_dev->active_scan_mask, - indio_dev->masklength, - in_ind + 1); } ch = iio_find_channel_from_si(indio_dev, in_ind); if (ch->scan_type.repeat > 1) diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 63041dcec7af..45ca056f019e 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -37,9 +37,6 @@ struct prox_state { struct hid_sensor_common common_attributes; struct hid_sensor_hub_attribute_info prox_attr; u32 human_presence; - int scale_pre_decml; - int scale_post_decml; - int scale_precision; }; /* Channel definitions */ @@ -108,9 +105,8 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->scale_pre_decml; - *val2 = prox_state->scale_post_decml; - ret_type = prox_state->scale_precision; + *val = prox_state->prox_attr.units; + ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_OFFSET: *val = hid_sensor_convert_exponent( @@ -244,12 +240,6 @@ static int prox_parse_report(struct platform_device *pdev, st->common_attributes.sensitivity.index, st->common_attributes.sensitivity.report_id); } - - st->scale_precision = hid_sensor_format_scale( - hsdev->usage, - &st->prox_attr, - &st->scale_pre_decml, &st->scale_post_decml); - return ret; } diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 26bd0983fa06..9f5825f4fc0e 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -35,12 +35,9 @@ #define LTR501_PART_ID 0x86 #define LTR501_MANUFAC_ID 0x87 #define LTR501_ALS_DATA1 0x88 /* 16-bit, little endian */ -#define LTR501_ALS_DATA1_UPPER 0x89 /* upper 8 bits of LTR501_ALS_DATA1 */ #define LTR501_ALS_DATA0 0x8a /* 16-bit, little endian */ -#define LTR501_ALS_DATA0_UPPER 0x8b /* upper 8 bits of LTR501_ALS_DATA0 */ #define LTR501_ALS_PS_STATUS 0x8c #define LTR501_PS_DATA 0x8d /* 16-bit, little endian */ -#define LTR501_PS_DATA_UPPER 0x8e /* upper 8 bits of LTR501_PS_DATA */ #define LTR501_INTR 0x8f /* output mode, polarity, mode */ #define LTR501_PS_THRESH_UP 0x90 /* 11 bit, ps upper threshold */ #define LTR501_PS_THRESH_LOW 0x92 /* 11 bit, ps lower threshold */ @@ -411,19 +408,18 @@ static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2]) static int ltr501_read_ps(struct ltr501_data *data) { - __le16 status; - int ret; + int ret, status; ret = ltr501_drdy(data, LTR501_STATUS_PS_RDY); if (ret < 0) return ret; ret = regmap_bulk_read(data->regmap, LTR501_PS_DATA, - &status, sizeof(status)); + &status, 2); if (ret < 0) return ret; - return le16_to_cpu(status); + return status; } static int ltr501_read_intr_prst(struct ltr501_data *data, @@ -1184,7 +1180,7 @@ static struct ltr501_chip_info ltr501_chip_info_tbl[] = { .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl), .ps_gain = ltr559_ps_gain_tbl, .ps_gain_tbl_size = ARRAY_SIZE(ltr559_ps_gain_tbl), - .als_mode_active = BIT(0), + .als_mode_active = BIT(1), .als_gain_mask = BIT(2) | BIT(3) | BIT(4), .als_gain_shift = 2, .info = <r501_info, @@ -1248,7 +1244,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1, (u8 *)als_buf, sizeof(als_buf)); if (ret < 0) - goto done; + return ret; if (test_bit(0, indio_dev->active_scan_mask)) scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) @@ -1332,12 +1328,9 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case LTR501_ALS_DATA1: - case LTR501_ALS_DATA1_UPPER: case LTR501_ALS_DATA0: - case LTR501_ALS_DATA0_UPPER: case LTR501_ALS_PS_STATUS: case LTR501_PS_DATA: - case LTR501_PS_DATA_UPPER: return true; default: return false; diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index 8197af263b80..42d334ba612e 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -547,8 +547,9 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) mutex_lock(&data->lock); ret = regmap_field_read(data->reg_flag_nf, &dir); if (ret < 0) { - dev_err(&data->client->dev, "register read failed: %d\n", ret); - goto out; + dev_err(&data->client->dev, "register read failed\n"); + mutex_unlock(&data->lock); + return ret; } event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, @@ -560,7 +561,6 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private) ret = regmap_field_write(data->reg_flag_psint, 0); if (ret < 0) dev_err(&data->client->dev, "failed to reset interrupts\n"); -out: mutex_unlock(&data->lock); return IRQ_HANDLED; diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c index 4900ad1ac51f..261d517428e4 100644 --- a/drivers/iio/magnetometer/mag3110.c +++ b/drivers/iio/magnetometer/mag3110.c @@ -52,12 +52,6 @@ struct mag3110_data { struct i2c_client *client; struct mutex lock; u8 ctrl_reg1; - /* Ensure natural alignment of timestamp */ - struct { - __be16 channels[3]; - u8 temperature; - s64 ts __aligned(8); - } scan; }; static int mag3110_request(struct mag3110_data *data) @@ -251,9 +245,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mag3110_data *data = iio_priv(indio_dev); + u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */ int ret; - ret = mag3110_read(data, data->scan.channels); + ret = mag3110_read(data, (__be16 *) buffer); if (ret < 0) goto done; @@ -262,10 +257,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p) MAG3110_DIE_TEMP); if (ret < 0) goto done; - data->scan.temperature = ret; + buffer[6] = ret; } - iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + iio_push_to_buffers_with_timestamp(indio_dev, buffer, iio_get_time_ns()); done: diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c index 6ed4e6902eff..0f5b8767ec2e 100644 --- a/drivers/iio/pressure/mpl3115.c +++ b/drivers/iio/pressure/mpl3115.c @@ -139,14 +139,7 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mpl3115_data *data = iio_priv(indio_dev); - /* - * 32-bit channel + 16-bit channel + padding + ts - * Note that it is possible for only one of the first 2 - * channels to be enabled. If that happens, the first element - * of the buffer may be either 16 or 32-bits. As such we cannot - * use a simple structure definition to express this data layout. - */ - u8 buffer[16] __aligned(8); + u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */ int ret, pos = 0; mutex_lock(&data->lock); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ba713bc27c5f..53c622c99ee4 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1252,7 +1252,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); - cm_id_priv->timewait_info = NULL; goto out; } @@ -1682,7 +1681,6 @@ static int cm_req_handler(struct cm_work *work) id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); - cm_id_priv->timewait_info = NULL; goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b5e7bd23857e..b59a4a819aaa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2227,8 +2227,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; - if (!route->path_rec) - route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); + route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a14a3ec99ffe..179e8134d57f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -848,8 +848,7 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid, NULL); if (ret) - continue; - + return ret; if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 27bc51409f55..e9e75f40714c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -342,11 +342,6 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, mutex_lock(&file->mutex); - if (file->agents_dead) { - mutex_unlock(&file->mutex); - return -EIO; - } - while (list_empty(&file->recv_list)) { mutex_unlock(&file->mutex); @@ -489,7 +484,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, agent = __get_agent(file, packet->mad.hdr.id); if (!agent) { - ret = -EIO; + ret = -EINVAL; goto err_up; } diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index f422a8a2528b..54fd4d81a3f1 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3441,14 +3441,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) ep->com.local_addr.ss_family == AF_INET) { err = cxgb4_remove_server_filter( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], false); + ep->com.dev->rdev.lldi.rxq_ids[0], 0); } else { struct sockaddr_in6 *sin6; c4iw_init_wr_wait(&ep->com.wr_wait); err = cxgb4_remove_server( ep->com.dev->rdev.lldi.ports[0], ep->stid, - ep->com.dev->rdev.lldi.rxq_ids[0], - ep->com.local_addr.ss_family == AF_INET6); + ep->com.dev->rdev.lldi.rxq_ids[0], 0); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 31a811968511..04206c600098 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -277,7 +277,6 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); - ret = -EINVAL; goto free_dma; } @@ -1896,11 +1895,10 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof *attr); memset(init_attr, 0, sizeof *init_attr); attr->qp_state = to_ib_qp_state(qhp->attr.state); - attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; - init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; + init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index a15beb161b64..ecd461ee6dbe 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -766,10 +766,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; - else { - err = -EINVAL; + else goto err; - } } err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 1b0bb340281e..59e1f6ea2ede 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -612,7 +612,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev, entry->opcode = IB_WC_BIND_MW; break; default: - entry->opcode = 0xFF; + entry->opcode = MTHCA_OPCODE_INVALID; break; } } else { diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index e1fc67e73bf8..4393a022867b 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -105,6 +105,7 @@ enum { MTHCA_OPCODE_ATOMIC_CS = 0x11, MTHCA_OPCODE_ATOMIC_FA = 0x12, MTHCA_OPCODE_BIND_MW = 0x18, + MTHCA_OPCODE_INVALID = 0xff }; enum { diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 2d0b992579d6..3e0677c51276 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -41,7 +41,6 @@ #include #include #include -#include #include "qib.h" #include "qib_user_sdma.h" @@ -607,7 +606,7 @@ done: /* * How many pages in this iovec element? */ -static size_t qib_user_sdma_num_pages(const struct iovec *iov) +static int qib_user_sdma_num_pages(const struct iovec *iov) { const unsigned long addr = (unsigned long) iov->iov_base; const unsigned long len = iov->iov_len; @@ -663,7 +662,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev, static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, - unsigned long addr, int tlen, size_t npages) + unsigned long addr, int tlen, int npages) { struct page *pages[8]; int i, j; @@ -727,7 +726,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd, unsigned long idx; for (idx = 0; idx < niov; idx++) { - const size_t npages = qib_user_sdma_num_pages(iov + idx); + const int npages = qib_user_sdma_num_pages(iov + idx); const unsigned long addr = (unsigned long) iov[idx].iov_base; ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, @@ -829,8 +828,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, unsigned pktnw; unsigned pktnwc; int nfrags = 0; - size_t npages = 0; - size_t bytes_togo = 0; + int npages = 0; + int bytes_togo = 0; int tiddma = 0; int cfur; @@ -890,11 +889,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, npages += qib_user_sdma_num_pages(&iov[idx]); - if (check_add_overflow(bytes_togo, slen, &bytes_togo) || - bytes_togo > type_max(typeof(pkt->bytes_togo))) { - ret = -EINVAL; - goto free_pbc; - } + bytes_togo += slen; pktnwc += slen >> 2; idx++; nfrags++; @@ -913,10 +908,10 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, } if (frag_size) { - size_t tidsmsize, n, pktsize, sz, addrlimit; + int pktsize, tidsmsize, n; n = npages*((2*PAGE_SIZE/frag_size)+1); - pktsize = struct_size(pkt, addr, n); + pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n; /* * Determine if this is tid-sdma or just sdma. @@ -931,24 +926,14 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, else tidsmsize = 0; - if (check_add_overflow(pktsize, tidsmsize, &sz)) { - ret = -EINVAL; - goto free_pbc; - } - pkt = kmalloc(sz, GFP_KERNEL); + pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL); if (!pkt) { ret = -ENOMEM; goto free_pbc; } pkt->largepkt = 1; pkt->frag_size = frag_size; - if (check_add_overflow(n, ARRAY_SIZE(pkt->addr), - &addrlimit) || - addrlimit > type_max(typeof(pkt->addrlimit))) { - ret = -EINVAL; - goto free_pkt; - } - pkt->addrlimit = addrlimit; + pkt->addrlimit = n + ARRAY_SIZE(pkt->addr); if (tiddma) { char *tidsm = (char *)pkt + pktsize; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 99d65f211b44..8e18bfca5516 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -180,7 +180,6 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, } usnic_uiom_free_dev_list(dev_list); - dev_list = NULL; } if (!found) { @@ -208,8 +207,6 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, spin_unlock(&vf->lock); if (IS_ERR_OR_NULL(qp_grp)) { usnic_err("Failed to allocate qp_grp\n"); - if (usnic_ib_share_vf) - usnic_uiom_free_dev_list(dev_list); return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e0274d57e516..3b4188efc283 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3576,11 +3576,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * srp_queue_remove_work() queues a call to - * srp_remove_target(). The latter function cancels - * target->tl_err_work so waiting for the remove works to - * finish is sufficient. + * Wait for tl_err and target port removal tasks. */ + flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c index 66a46c84e28f..8f2042432c85 100644 --- a/drivers/input/ff-core.c +++ b/drivers/input/ff-core.c @@ -237,15 +237,9 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) EXPORT_SYMBOL_GPL(input_ff_erase); /* - * input_ff_flush - erase all effects owned by a file handle - * @dev: input device to erase effect from - * @file: purported owner of the effects - * - * This function erases all force-feedback effects associated with - * the given owner from specified device. Note that @file may be %NULL, - * in which case all effects will be erased. + * flush_effects - erase all effects owned by a file handle */ -int input_ff_flush(struct input_dev *dev, struct file *file) +static int flush_effects(struct input_dev *dev, struct file *file) { struct ff_device *ff = dev->ff; int i; @@ -261,7 +255,6 @@ int input_ff_flush(struct input_dev *dev, struct file *file) return 0; } -EXPORT_SYMBOL_GPL(input_ff_flush); /** * input_ff_event() - generic handler for force-feedback events @@ -350,7 +343,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects) mutex_init(&ff->mutex); dev->ff = ff; - dev->flush = input_ff_flush; + dev->flush = flush_effects; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); diff --git a/drivers/input/input.c b/drivers/input/input.c index e90c9f0262e8..857917086cb0 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2119,12 +2119,6 @@ int input_register_device(struct input_dev *dev) /* KEY_RESERVED is not supposed to be transmitted to userspace. */ __clear_bit(KEY_RESERVED, dev->keybit); - /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */ - if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) { - __clear_bit(BTN_RIGHT, dev->keybit); - __clear_bit(BTN_MIDDLE, dev->keybit); - } - /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 1b773ea66772..5d11fea3c8ec 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -448,7 +448,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, if (IS_ERR(abspam)) return PTR_ERR(abspam); - for (i = 0; i < len && i < joydev->nabs; i++) { + for (i = 0; i < joydev->nabs; i++) { if (abspam[i] > ABS_MAX) { retval = -EINVAL; goto out; @@ -472,9 +472,6 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, int i; int retval = 0; - if (len % sizeof(*keypam)) - return -EINVAL; - len = min(len, sizeof(joydev->keypam)); /* Validate the map. */ @@ -482,7 +479,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, if (IS_ERR(keypam)) return PTR_ERR(keypam); - for (i = 0; i < (len / 2) && i < joydev->nkey; i++) { + for (i = 0; i < joydev->nkey; i++) { if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) { retval = -EINVAL; goto out; @@ -492,7 +489,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev, memcpy(joydev->keypam, keypam, len); for (i = 0; i < joydev->nkey; i++) - joydev->keymap[joydev->keypam[i] - BTN_MISC] = i; + joydev->keymap[keypam[i] - BTN_MISC] = i; out: kfree(keypam); diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c index cfa1be4ad868..f4445a4e8d6a 100644 --- a/drivers/input/joystick/spaceball.c +++ b/drivers/input/joystick/spaceball.c @@ -35,7 +35,6 @@ #include #include #include -#include #define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver" @@ -92,15 +91,9 @@ static void spaceball_process_packet(struct spaceball* spaceball) case 'D': /* Ball data */ if (spaceball->idx != 15) return; - /* - * Skip first three bytes; read six axes worth of data. - * Axis values are signed 16-bit big-endian. - */ - data += 3; - for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) { + for (i = 0; i < 6; i++) input_report_abs(dev, spaceball_axes[i], - (__s16)get_unaligned_be16(&data[i * 2])); - } + (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2])); break; case 'K': /* Button data */ diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index f8f6bd92e314..54a6691d7d87 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -232,17 +232,9 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -266,7 +258,6 @@ static const struct xpad_device { { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, - { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, @@ -321,10 +312,6 @@ static const struct xpad_device { { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, - { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, - { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, - { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, - { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, @@ -348,7 +335,6 @@ static const struct xpad_device { { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, - { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } @@ -449,7 +435,6 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ - XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ @@ -459,13 +444,8 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ - XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ - XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ - XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ - XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ - XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ { } }; diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 44a5a5496cfd..b01966dc7eb3 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -137,7 +137,6 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev, "changed: [r%d c%d]: byte %02x\n", row, col, new_state); - input_event(idev, EV_MSC, MSC_SCAN, pos); input_report_key(idev, keycodes[pos], new_state); } diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c index da07742fd9a4..5b152f25a8e1 100644 --- a/drivers/input/keyboard/hil_kbd.c +++ b/drivers/input/keyboard/hil_kbd.c @@ -512,7 +512,6 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv) HIL_IDD_NUM_AXES_PER_SET(*idd)) { printk(KERN_INFO PREFIX "combo devices are not supported.\n"); - error = -EINVAL; goto bail1; } diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c index bcec72367c1d..7abfd34eb87e 100644 --- a/drivers/input/keyboard/nspire-keypad.c +++ b/drivers/input/keyboard/nspire-keypad.c @@ -96,15 +96,9 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static int nspire_keypad_open(struct input_dev *input) +static int nspire_keypad_chip_init(struct nspire_keypad *keypad) { - struct nspire_keypad *keypad = input_get_drvdata(input); unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles; - int error; - - error = clk_prepare_enable(keypad->clk); - if (error) - return error; cycles_per_us = (clk_get_rate(keypad->clk) / 1000000); if (cycles_per_us == 0) @@ -130,17 +124,36 @@ static int nspire_keypad_open(struct input_dev *input) keypad->int_mask = 1 << 1; writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK); + /* Disable GPIO interrupts to prevent hanging on touchpad */ + /* Possibly used to detect touchpad events */ + writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); + /* Acknowledge existing interrupts */ + writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); + return 0; } -static void nspire_keypad_close(struct input_dev *input) +static int nspire_keypad_open(struct input_dev *input) { struct nspire_keypad *keypad = input_get_drvdata(input); + int error; - /* Disable interrupts */ - writel(0, keypad->reg_base + KEYPAD_INTMSK); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_INT); + error = clk_prepare_enable(keypad->clk); + if (error) + return error; + + error = nspire_keypad_chip_init(keypad); + if (error) { + clk_disable_unprepare(keypad->clk); + return error; + } + + return 0; +} + +static void nspire_keypad_close(struct input_dev *input) +{ + struct nspire_keypad *keypad = input_get_drvdata(input); clk_disable_unprepare(keypad->clk); } @@ -202,25 +215,6 @@ static int nspire_keypad_probe(struct platform_device *pdev) return -ENOMEM; } - error = clk_prepare_enable(keypad->clk); - if (error) { - dev_err(&pdev->dev, "failed to enable clock\n"); - return error; - } - - /* Disable interrupts */ - writel(0, keypad->reg_base + KEYPAD_INTMSK); - /* Acknowledge existing interrupts */ - writel(~0, keypad->reg_base + KEYPAD_INT); - - /* Disable GPIO interrupts to prevent hanging on touchpad */ - /* Possibly used to detect touchpad events */ - writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT); - /* Acknowledge existing GPIO interrupts */ - writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS); - - clk_disable_unprepare(keypad->clk); - input_set_drvdata(input, keypad); input->id.bustype = BUS_HOST; diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index c8c6650b9140..50a7faa504f7 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -546,15 +546,12 @@ static int cm109_input_open(struct input_dev *idev) dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->ctl_data->byte[HID_OR3] = 0x00; - dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); - if (error) { - dev->ctl_urb_pending = 0; + if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); - } else { + else dev->open = 1; - } mutex_unlock(&dev->pm_mutex); diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index c67a0113020c..7cffff22a1ca 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -230,18 +230,6 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id) return uinput_request_submit(udev, &request); } -static int uinput_dev_flush(struct input_dev *dev, struct file *file) -{ - /* - * If we are called with file == NULL that means we are tearing - * down the device, and therefore we can not handle FF erase - * requests: either we are handling UI_DEV_DESTROY (and holding - * the udev->mutex), or the file descriptor is closed and there is - * nobody on the other side anymore. - */ - return file ? input_ff_flush(dev, file) : 0; -} - static void uinput_destroy_device(struct uinput_device *udev) { const char *name, *phys; @@ -285,12 +273,6 @@ static int uinput_create_device(struct uinput_device *udev) dev->ff->playback = uinput_dev_playback; dev->ff->set_gain = uinput_dev_set_gain; dev->ff->set_autocenter = uinput_dev_set_autocenter; - /* - * The standard input_ff_flush() implementation does - * not quite work for uinput as we can't reasonably - * handle FF requests during device teardown. - */ - dev->flush = uinput_dev_flush; } error = input_register_device(udev->dev); diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index 11773838a34d..ef234c9b2f2f 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -929,8 +929,6 @@ static int atp_probe(struct usb_interface *iface, set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); set_bit(BTN_LEFT, input_dev->keybit); - INIT_WORK(&dev->work, atp_reinit); - error = input_register_device(dev->input); if (error) goto err_free_buffer; @@ -938,6 +936,8 @@ static int atp_probe(struct usb_interface *iface, /* save our data pointer in this interface device */ usb_set_intfdata(iface, dev); + INIT_WORK(&dev->work, atp_reinit); + return 0; err_free_buffer: diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index c7ab84bc877f..e4eb048d1bf6 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa, memset(&cmd, 0, sizeof(cmd)); put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr); - put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length); + put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2); cmd.head.report_id = PIP_APP_CMD_REPORT_ID; cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE; put_unaligned_le16(read_offset, &cmd.read_offset); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 44015e6bd6a0..16f5d5660053 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -132,21 +132,55 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, return 0; } -static int elan_set_power(struct elan_tp_data *data, bool on) +static int elan_enable_power(struct elan_tp_data *data) { int repeat = ETP_RETRY_COUNT; int error; + error = regulator_enable(data->vcc); + if (error) { + dev_err(&data->client->dev, + "failed to enable regulator: %d\n", error); + return error; + } + do { - error = data->ops->power_control(data->client, on); + error = data->ops->power_control(data->client, true); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to set power %s: %d\n", - on ? "on" : "off", error); + dev_err(&data->client->dev, "failed to enable power: %d\n", error); + return error; +} + +static int elan_disable_power(struct elan_tp_data *data) +{ + int repeat = ETP_RETRY_COUNT; + int error; + + do { + error = data->ops->power_control(data->client, false); + if (!error) { + error = regulator_disable(data->vcc); + if (error) { + dev_err(&data->client->dev, + "failed to disable regulator: %d\n", + error); + /* Attempt to power the chip back up */ + data->ops->power_control(data->client, true); + break; + } + + return 0; + } + + msleep(30); + } while (--repeat > 0); + + dev_err(&data->client->dev, "failed to disable power: %d\n", error); return error; } @@ -1161,19 +1195,9 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_set_power(data, false); - if (ret) - goto err; - - ret = regulator_disable(data->vcc); - if (ret) { - dev_err(dev, "error %d disabling regulator\n", ret); - /* Attempt to power the chip back up */ - elan_set_power(data, true); - } + ret = elan_disable_power(data); } -err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1184,18 +1208,12 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (!device_may_wakeup(dev)) { - error = regulator_enable(data->vcc); - if (error) { - dev_err(dev, "error %d enabling regulator\n", error); - goto err; - } - } else if (data->irq_wake) { + if (device_may_wakeup(dev) && data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_set_power(data, true); + error = elan_enable_power(data); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index d5307b927378..7b942ee364b6 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -435,19 +435,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse, case 0x16008020U: case 0x26800010U: case 0x36808000U: - - /* - * This firmware misreport coordinates for trackpoint - * occasionally. Discard packets outside of [-127, 127] range - * to prevent cursor jumps. - */ - if (packet[4] == 0x80 || packet[5] == 0x80 || - packet[1] >> 7 == packet[4] >> 7 || - packet[2] >> 7 == packet[5] >> 7) { - elantech_debug("discarding packet [%6ph]\n", packet); - break; - - } x = packet[4] - (int)((packet[1]^0x80) << 1); y = (int)((packet[2]^0x80) << 1) - packet[5]; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 6cd2ae95e21e..82ff44637ed7 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -224,12 +224,6 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), - DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), - }, - }, { } }; @@ -276,13 +270,6 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), }, }, - { - /* Fujitsu Lifebook T725 laptop */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), - }, - }, { /* Fujitsu Lifebook U745 */ .matches = { @@ -672,12 +659,6 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"), }, }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */ - }, - }, { } }; static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { @@ -702,48 +683,6 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"), - }, - }, { /* Advent 4211 */ .matches = { @@ -924,13 +863,6 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), }, }, - { - /* Fujitsu Lifebook T725 laptop */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T725"), - }, - }, { /* Fujitsu U574 laptop */ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 37f84ba11f05..6b648339733f 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1456,8 +1456,7 @@ static int __init i8042_setup_aux(void) if (error) goto err_free_ports; - error = aux_enable(); - if (error) + if (aux_enable()) goto err_free_irq; i8042_aux_irq_registered = true; diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 1d98198c4bdf..a61b2153ab8c 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -35,7 +35,6 @@ #include #include #include -#include /* * This code has been heavily tested on a Nokia 770, and lightly @@ -411,7 +410,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command) if (status == 0) { /* BE12 value, then padding */ - status = get_unaligned_be16(&req->sample[1]); + status = be16_to_cpu(*((u16 *)&req->sample[1])); status = status >> 3; status &= 0x0fff; } @@ -786,11 +785,10 @@ static void ads7846_report_state(struct ads7846 *ts) /* compute touch pressure resistance using equation #2 */ Rt = z2; Rt -= z1; - Rt *= ts->x_plate_ohms; - Rt = DIV_ROUND_CLOSEST(Rt, 16); Rt *= x; + Rt *= ts->x_plate_ohms; Rt /= z1; - Rt = DIV_ROUND_CLOSEST(Rt, 256); + Rt = (Rt + 2047) >> 12; } else { Rt = 0; } diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index e2e31cbd6b2c..8051a4b704ea 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -345,10 +345,8 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) switch (elo->id) { case 0: /* 10-byte protocol */ - if (elo_setup_10(elo)) { - err = -EIO; + if (elo_setup_10(elo)) goto fail3; - } break; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index d7cc8f6a292e..67cadda13ab1 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -77,18 +77,6 @@ static const struct dmi_system_id rotated_screen[] = { DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"), }, }, - { - .ident = "Teclast X98 Pro", - .matches = { - /* - * Only match BIOS date, because the manufacturers - * BIOS does not report the board name at all - * (sometimes)... - */ - DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"), - DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"), - }, - }, { .ident = "WinBook TW100", .matches = { diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c index 490f3055aec3..bb6f2fe14667 100644 --- a/drivers/input/touchscreen/of_touchscreen.c +++ b/drivers/input/touchscreen/of_touchscreen.c @@ -75,8 +75,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch) data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x", input_abs_get_max(input, axis) + 1, - &maximum); - data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", + &maximum) | + touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", input_abs_get_fuzz(input, axis), &fuzz); if (data_present) @@ -86,8 +86,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch) data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y", input_abs_get_max(input, axis) + 1, - &maximum); - data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", + &maximum) | + touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", input_abs_get_fuzz(input, axis), &fuzz); if (data_present) @@ -97,11 +97,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch) data_present = touchscreen_get_prop_u32(dev, "touchscreen-max-pressure", input_abs_get_max(input, axis), - &maximum); - data_present |= touchscreen_get_prop_u32(dev, - "touchscreen-fuzz-pressure", - input_abs_get_fuzz(input, axis), - &fuzz); + &maximum) | + touchscreen_get_prop_u32(dev, + "touchscreen-fuzz-pressure", + input_abs_get_fuzz(input, axis), + &fuzz); if (data_present) touchscreen_set_params(input, axis, maximum, fuzz); } diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index c5d34a782372..499402a975b3 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -266,7 +266,7 @@ static int e2i_init(struct usbtouch_usb *usbtouch) int ret; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, 0x02, 0x0000, 0x0081, NULL, 0, USB_CTRL_SET_TIMEOUT); @@ -462,7 +462,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch) int ret, i; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_RESET, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); @@ -474,7 +474,7 @@ static int mtouch_init(struct usbtouch_usb *usbtouch) msleep(150); for (i = 0; i < 3; i++) { - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_ASYNC_REPORT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT); @@ -645,7 +645,7 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) } /* start sending data */ - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_DATA1, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d916ac079662..66a406e87e11 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3625,7 +3625,7 @@ static struct irq_chip amd_ir_chip; #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) -#define DTE_IRQ_TABLE_LEN (9ULL << 1) +#define DTE_IRQ_TABLE_LEN (8ULL << 1) #define DTE_IRQ_REMAP_ENABLE 1ULL static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 54f27dd9f156..00169c9eb3ee 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1012,8 +1012,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; u32 ver, sts; - int agaw = -1; - int msagaw = -1; + int agaw = 0; + int msagaw = 0; int err; if (!drhd->reg_base_addr) { @@ -1038,28 +1038,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } err = -EINVAL; - if (cap_sagaw(iommu->cap) == 0) { - pr_info("%s: No supported address widths. Not attempting DMA translation.\n", - iommu->name); - drhd->ignored = 1; + agaw = iommu_calculate_agaw(iommu); + if (agaw < 0) { + pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + goto err_unmap; } - - if (!drhd->ignored) { - agaw = iommu_calculate_agaw(iommu); - if (agaw < 0) { - pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - drhd->ignored = 1; - } - } - if (!drhd->ignored) { - msagaw = iommu_calculate_max_sagaw(iommu); - if (msagaw < 0) { - pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - drhd->ignored = 1; - agaw = -1; - } + msagaw = iommu_calculate_max_sagaw(iommu); + if (msagaw < 0) { + pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + goto err_unmap; } iommu->agaw = agaw; iommu->msagaw = msagaw; @@ -1087,15 +1076,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); drhd->iommu = iommu; - iommu->drhd = drhd; - if (intel_iommu_enabled && !drhd->ignored) + if (intel_iommu_enabled) iommu->iommu_dev = iommu_device_create(NULL, iommu, intel_iommu_groups, "%s", iommu->name); return 0; +err_unmap: + unmap_iommu(iommu); error_free_seq_id: dmar_free_seq_id(iommu); error: @@ -1105,8 +1095,7 @@ error: static void free_iommu(struct intel_iommu *iommu) { - if (intel_iommu_enabled && !iommu->drhd->ignored) - iommu_device_destroy(iommu->iommu_dev); + iommu_device_destroy(iommu->iommu_dev); if (iommu->irq) { if (iommu->pr_irq) { diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 88ba1a65c283..ce125ec23d2a 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1350,8 +1350,6 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, irq_data = irq_domain_get_irq_data(domain, virq + i); irq_cfg = irqd_cfg(irq_data); if (!irq_data || !irq_cfg) { - if (!i) - kfree(data); ret = -EINVAL; goto out_free_data; } diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c index 4294523bede5..9b23843dcad4 100644 --- a/drivers/ipack/carriers/tpci200.c +++ b/drivers/ipack/carriers/tpci200.c @@ -94,13 +94,16 @@ static void tpci200_unregister(struct tpci200_board *tpci200) free_irq(tpci200->info->pdev->irq, (void *) tpci200); pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); pci_disable_device(tpci200->info->pdev); + pci_dev_put(tpci200->info->pdev); } static void tpci200_enable_irq(struct tpci200_board *tpci200, @@ -521,7 +524,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); if (!tpci200->info) { ret = -ENOMEM; - goto err_tpci200; + goto out_err_info; } pci_dev_get(pdev); @@ -532,7 +535,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); ret = -EBUSY; - goto err_tpci200_info; + goto out_err_pci_request; } tpci200->info->cfg_regs = ioremap_nocache( pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), @@ -540,7 +543,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (!tpci200->info->cfg_regs) { dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); ret = -EFAULT; - goto err_request_region; + goto out_err_ioremap; } /* Disable byte swapping for 16 bit IP module access. This will ensure @@ -563,7 +566,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, if (ret) { dev_err(&pdev->dev, "error during tpci200 install\n"); ret = -ENODEV; - goto err_cfg_regs; + goto out_err_install; } /* Register the carrier in the industry pack bus driver */ @@ -575,7 +578,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "error registering the carrier on ipack driver\n"); ret = -EFAULT; - goto err_tpci200_install; + goto out_err_bus_register; } /* save the bus number given by ipack to logging purpose */ @@ -586,16 +589,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, tpci200_create_device(tpci200, i); return 0; -err_tpci200_install: +out_err_bus_register: tpci200_uninstall(tpci200); -err_cfg_regs: - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); -err_request_region: +out_err_install: + iounmap(tpci200->info->cfg_regs); +out_err_ioremap: pci_release_region(pdev, TPCI200_CFG_MEM_BAR); -err_tpci200_info: - kfree(tpci200->info); +out_err_pci_request: pci_dev_put(pdev); -err_tpci200: + kfree(tpci200->info); +out_err_info: kfree(tpci200); return ret; } @@ -605,12 +608,6 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) ipack_bus_unregister(tpci200->info->ipack_bus); tpci200_uninstall(tpci200); - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); - - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); - - pci_dev_put(tpci200->info->pdev); - kfree(tpci200->info); kfree(tpci200); } diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index c1175464064b..035d5449227e 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c @@ -38,7 +38,6 @@ struct ipoctal_channel { unsigned int pointer_read; unsigned int pointer_write; struct tty_port tty_port; - bool tty_registered; union scc2698_channel __iomem *regs; union scc2698_block __iomem *block_regs; unsigned int board_id; @@ -87,34 +86,22 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty) return 0; } -static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty) +static int ipoctal_open(struct tty_struct *tty, struct file *file) { struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); - int res; - - if (!ipack_get_carrier(ipoctal->dev)) - return -EBUSY; - - res = tty_standard_install(driver, tty); - if (res) - goto err_put_carrier; + int err; tty->driver_data = channel; - return 0; - -err_put_carrier: - ipack_put_carrier(ipoctal->dev); - - return res; -} + if (!ipack_get_carrier(ipoctal->dev)) + return -EBUSY; -static int ipoctal_open(struct tty_struct *tty, struct file *file) -{ - struct ipoctal_channel *channel = tty->driver_data; + err = tty_port_open(&channel->tty_port, tty, file); + if (err) + ipack_put_carrier(ipoctal->dev); - return tty_port_open(&channel->tty_port, tty, file); + return err; } static void ipoctal_reset_stats(struct ipoctal_stats *stats) @@ -282,6 +269,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, int res; int i; struct tty_driver *tty; + char name[20]; struct ipoctal_channel *channel; struct ipack_region *region; void __iomem *addr; @@ -372,11 +360,8 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, /* Fill struct tty_driver with ipoctal data */ tty->owner = THIS_MODULE; tty->driver_name = KBUILD_MODNAME; - tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); - if (!tty->name) { - res = -ENOMEM; - goto err_put_driver; - } + sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); + tty->name = name; tty->major = 0; tty->minor_start = 0; @@ -392,7 +377,8 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, res = tty_register_driver(tty); if (res) { dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n"); - goto err_free_name; + put_tty_driver(tty); + return res; } /* Save struct tty_driver for use it when uninstalling the device */ @@ -403,9 +389,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, channel = &ipoctal->channel[i]; tty_port_init(&channel->tty_port); - res = tty_port_alloc_xmit_buf(&channel->tty_port); - if (res) - continue; + tty_port_alloc_xmit_buf(&channel->tty_port); channel->tty_port.ops = &ipoctal_tty_port_ops; ipoctal_reset_stats(&channel->stats); @@ -413,15 +397,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, spin_lock_init(&channel->lock); channel->pointer_read = 0; channel->pointer_write = 0; - tty_dev = tty_port_register_device_attr(&channel->tty_port, tty, - i, NULL, channel, NULL); + tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL); if (IS_ERR(tty_dev)) { dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n"); - tty_port_free_xmit_buf(&channel->tty_port); tty_port_destroy(&channel->tty_port); continue; } - channel->tty_registered = true; + dev_set_drvdata(tty_dev, channel); } /* @@ -433,13 +415,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr, ipoctal_irq_handler, ipoctal); return 0; - -err_free_name: - kfree(tty->name); -err_put_driver: - put_tty_driver(tty); - - return res; } static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel, @@ -681,7 +656,6 @@ static void ipoctal_cleanup(struct tty_struct *tty) static const struct tty_operations ipoctal_fops = { .ioctl = NULL, - .install = ipoctal_install, .open = ipoctal_open, .close = ipoctal_close, .write = ipoctal_write_tty, @@ -724,17 +698,12 @@ static void __ipoctal_remove(struct ipoctal *ipoctal) for (i = 0; i < NR_CHANNELS; i++) { struct ipoctal_channel *channel = &ipoctal->channel[i]; - - if (!channel->tty_registered) - continue; - tty_unregister_device(ipoctal->tty_drv, i); tty_port_free_xmit_buf(&channel->tty_port); tty_port_destroy(&channel->tty_port); } tty_unregister_driver(ipoctal->tty_drv); - kfree(ipoctal->tty_drv->name); put_tty_driver(ipoctal->tty_drv); kfree(ipoctal); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index fee7c6f3e971..d4ae43f71e72 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -356,7 +356,7 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, its_fixup_cmd(cmd); - return desc->its_invall_cmd.col; + return NULL; } static u64 its_cmd_ptr_to_offset(struct its_node *its, diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 9694529b709d..b1777104fd9f 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -29,7 +29,7 @@ #define NVIC_ISER 0x000 #define NVIC_ICER 0x080 -#define NVIC_IPR 0x400 +#define NVIC_IPR 0x300 #define NVIC_MAX_BANKS 16 /* diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c index cd7fdce98359..c71914e8f596 100644 --- a/drivers/irqchip/irq-s3c24xx.c +++ b/drivers/irqchip/irq-s3c24xx.c @@ -368,25 +368,11 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc, asmlinkage void __exception_irq_entry s3c24xx_handle_irq(struct pt_regs *regs) { do { - /* - * For platform based machines, neither ERR nor NULL can happen here. - * The s3c24xx_handle_irq() will be set as IRQ handler iff this succeeds: - * - * s3c_intc[0] = s3c24xx_init_intc() - * - * If this fails, the next calls to s3c24xx_init_intc() won't be executed. - * - * For DT machine, s3c_init_intc_of() could set the IRQ handler without - * setting s3c_intc[0] only if it was called with num_ctrl=0. There is no - * such code path, so again the s3c_intc[0] will have a valid pointer if - * set_handle_irq() is called. - * - * Therefore in s3c24xx_handle_irq(), the s3c_intc[0] is always something. - */ - if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) - continue; + if (likely(s3c_intc[0])) + if (s3c24xx_handle_intc(s3c_intc[0], regs, 0)) + continue; - if (!IS_ERR_OR_NULL(s3c_intc[2])) + if (s3c_intc[2]) if (s3c24xx_handle_intc(s3c_intc[2], regs, 64)) continue; diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 658e116d8fe6..d15347de415a 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -564,11 +564,6 @@ int detach_capi_ctr(struct capi_ctr *ctr) ctr_down(ctr, CAPI_CTR_DETACHED); - if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) { - err = -EINVAL; - goto unlock_out; - } - if (capi_controller[ctr->cnr - 1] != ctr) { err = -EINVAL; goto unlock_out; @@ -850,7 +845,7 @@ EXPORT_SYMBOL(capi20_put_message); * Return value: CAPI result code */ -u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) +u16 capi20_get_manufacturer(u32 contr, u8 *buf) { struct capi_ctr *ctr; u16 ret; @@ -920,7 +915,7 @@ EXPORT_SYMBOL(capi20_get_version); * Return value: CAPI result code */ -u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) +u16 capi20_get_serial(u32 contr, u8 *serial) { struct capi_ctr *ctr; u16 ret; diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 89cf1d695a01..ff48da61c94c 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2352,7 +2352,7 @@ static void __exit HFC_cleanup(void) { if (timer_pending(&hfc_tl)) - del_timer_sync(&hfc_tl); + del_timer(&hfc_tl); pci_unregister_driver(&hfc_driver); } diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index d0b6377b9834..d5bdbaf93a1a 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -645,19 +645,17 @@ static void release_io(struct inf_hw *hw) { if (hw->cfg.mode) { - if (hw->cfg.mode == AM_MEMIO) { + if (hw->cfg.p) { release_mem_region(hw->cfg.start, hw->cfg.size); - if (hw->cfg.p) - iounmap(hw->cfg.p); + iounmap(hw->cfg.p); } else release_region(hw->cfg.start, hw->cfg.size); hw->cfg.mode = AM_NONE; } if (hw->addr.mode) { - if (hw->addr.mode == AM_MEMIO) { + if (hw->addr.p) { release_mem_region(hw->addr.start, hw->addr.size); - if (hw->addr.p) - iounmap(hw->addr.p); + iounmap(hw->addr.p); } else release_region(hw->addr.start, hw->addr.size); hw->addr.mode = AM_NONE; @@ -687,12 +685,9 @@ setup_io(struct inf_hw *hw) (ulong)hw->cfg.start, (ulong)hw->cfg.size); return err; } - hw->cfg.mode = hw->ci->cfg_mode; - if (hw->ci->cfg_mode == AM_MEMIO) { + if (hw->ci->cfg_mode == AM_MEMIO) hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size); - if (!hw->cfg.p) - return -ENOMEM; - } + hw->cfg.mode = hw->ci->cfg_mode; if (debug & DEBUG_HW) pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n", hw->name, (ulong)hw->cfg.start, @@ -717,12 +712,9 @@ setup_io(struct inf_hw *hw) (ulong)hw->addr.start, (ulong)hw->addr.size); return err; } - hw->addr.mode = hw->ci->addr_mode; - if (hw->ci->addr_mode == AM_MEMIO) { + if (hw->ci->addr_mode == AM_MEMIO) hw->addr.p = ioremap(hw->addr.start, hw->addr.size); - if (!hw->addr.p) - return -ENOMEM; - } + hw->addr.mode = hw->ci->addr_mode; if (debug & DEBUG_HW) pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n", hw->name, (ulong)hw->addr.start, diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index b4639b0aab3c..cb428b9ee441 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -709,7 +709,7 @@ isac_release(struct isac_hw *isac) { if (isac->type & IPAC_TYPE_ISACX) WriteISAC(isac, ISACX_MASK, 0xff); - else if (isac->type != 0) + else WriteISAC(isac, ISAC_MASK, 0xff); if (isac->dch.timer.function != NULL) { del_timer(&isac->dch.timer); diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index a74741d28ca8..8e2944784e00 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -963,8 +963,8 @@ nj_release(struct tiger_hw *card) nj_disable_hwirq(card); mode_tiger(&card->bc[0], ISDN_P_NONE); mode_tiger(&card->bc[1], ISDN_P_NONE); - spin_unlock_irqrestore(&card->lock, flags); card->isac.release(&card->isac); + spin_unlock_irqrestore(&card->lock, flags); release_region(card->base, card->base_s); card->base_s = 0; } @@ -1114,6 +1114,7 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) card->typ = NETJET_S_TJ300; card->base = pci_resource_start(pdev, 0); + card->irq = pdev->irq; pci_set_drvdata(pdev, card); err = setup_instance(card); if (err) diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig index fb61181a5c4f..c0730d5c734d 100644 --- a/drivers/isdn/mISDN/Kconfig +++ b/drivers/isdn/mISDN/Kconfig @@ -12,7 +12,6 @@ if MISDN != n config MISDN_DSP tristate "Digital Audio Processing of transparent data" depends on MISDN - select BITREVERSE help Enable support for digital audio processing capability. diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index f5a06a6fb297..faf505462a4f 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -390,7 +390,7 @@ mISDNInit(void) err = mISDN_inittimer(&debug); if (err) goto error2; - err = Isdnl1_Init(&debug); + err = l1_init(&debug); if (err) goto error3; err = Isdnl2_Init(&debug); @@ -404,7 +404,7 @@ mISDNInit(void) error5: Isdnl2_cleanup(); error4: - Isdnl1_cleanup(); + l1_cleanup(); error3: mISDN_timer_cleanup(); error2: @@ -417,7 +417,7 @@ static void mISDN_cleanup(void) { misdn_sock_cleanup(); Isdnl2_cleanup(); - Isdnl1_cleanup(); + l1_cleanup(); mISDN_timer_cleanup(); class_unregister(&mISDN_class); diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h index 3c039b6ade2e..52695bb81ee7 100644 --- a/drivers/isdn/mISDN/core.h +++ b/drivers/isdn/mISDN/core.h @@ -69,8 +69,8 @@ struct Bprotocol *get_Bprotocol4id(u_int); extern int mISDN_inittimer(u_int *); extern void mISDN_timer_cleanup(void); -extern int Isdnl1_Init(u_int *); -extern void Isdnl1_cleanup(void); +extern int l1_init(u_int *); +extern void l1_cleanup(void); extern int Isdnl2_Init(u_int *); extern void Isdnl2_cleanup(void); diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index 94d7cc58da64..bebc57b72138 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c @@ -407,7 +407,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) { EXPORT_SYMBOL(create_l1); int -Isdnl1_Init(u_int *deb) +l1_init(u_int *deb) { debug = deb; l1fsm_s.state_count = L1S_STATE_COUNT; @@ -419,7 +419,7 @@ Isdnl1_Init(u_int *deb) } void -Isdnl1_cleanup(void) +l1_cleanup(void) { mISDN_FsmFree(&l1fsm_s); } diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 01165b04a6b7..d12370352ae3 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -320,7 +320,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip) usleep_range(3000, 6000); ret = lp55xx_read(chip, LP5523_REG_STATUS, &status); if (ret) - goto out; + return ret; status &= LP5523_ENG_STATUS_MASK; if (status != LP5523_ENG_STATUS_MASK) { diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c index 71e04c3a7cc4..d9ecc3c5242e 100644 --- a/drivers/leds/leds-qpnp-wled.c +++ b/drivers/leds/leds-qpnp-wled.c @@ -2309,11 +2309,11 @@ static int qpnp_wled_config(struct qpnp_wled *wled) if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US) wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US; - reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US; - mask = QPNP_WLED_SYNC_DLY_MASK; - rc = qpnp_wled_masked_write_reg(wled, - QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i), - mask, reg); + reg = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US; + mask = QPNP_WLED_SYNC_DLY_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_SYNC_DLY_REG(wled->sink_base, i), + mask, reg); if (rc < 0) return rc; @@ -2321,64 +2321,64 @@ static int qpnp_wled_config(struct qpnp_wled *wled) if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA) wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA; - if (wled->calc_curr) - reg = (wled->fs_curr_ua + (QPNP_WLED_FS_CURR_STEP_UA - 1)) / - QPNP_WLED_FS_CURR_STEP_UA; - else - reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA; - mask = QPNP_WLED_FS_CURR_MASK; - rc = qpnp_wled_masked_write_reg(wled, - QPNP_WLED_FS_CURR_REG(wled->sink_base, i), - mask, reg); + reg = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA; + mask = QPNP_WLED_FS_CURR_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_FS_CURR_REG(wled->sink_base, i), + mask, reg); if (rc < 0) return rc; + if (wled->calc_curr) + temp = (wled->fs_curr_ua + (QPNP_WLED_FS_CURR_STEP_UA - 1)) / + QPNP_WLED_FS_CURR_STEP_UA; + else /* CABC */ - reg = wled->en_cabc ? (1 << QPNP_WLED_CABC_SHIFT) : 0; - mask = QPNP_WLED_CABC_MASK; - rc = qpnp_wled_masked_write_reg(wled, - QPNP_WLED_CABC_REG(wled->sink_base, i), - mask, reg); + reg = wled->en_cabc ? (1 << QPNP_WLED_CABC_SHIFT) : 0; + mask = QPNP_WLED_CABC_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_CABC_REG(wled->sink_base, i), + mask, reg); if (rc < 0) return rc; - } + } - /* Settings specific to valid sinks */ - for (i = 0; i < wled->num_strings; i++) { - if (wled->strings[i] >= wled->max_strings) { - dev_err(&wled->pdev->dev, "Invalid string number\n"); - return -EINVAL; - } - /* MODULATOR */ + /* Settings specific to valid sinks */ + for (i = 0; i < wled->num_strings; i++) { + if (wled->strings[i] >= wled->max_strings) { + dev_err(&wled->pdev->dev, "Invalid string number\n"); + return -EINVAL; + } + /* MODULATOR */ rc = qpnp_wled_read_reg(wled, QPNP_WLED_MOD_EN_REG(wled->sink_base, i), ®); if (rc < 0) return rc; - reg &= QPNP_WLED_MOD_EN_MASK; - reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT); + reg &= QPNP_WLED_MOD_EN_MASK; + reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT); - if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) - reg &= QPNP_WLED_GATE_DRV_MASK; - else - reg |= ~QPNP_WLED_GATE_DRV_MASK; + if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) + reg &= QPNP_WLED_GATE_DRV_MASK; + else + reg |= ~QPNP_WLED_GATE_DRV_MASK; rc = qpnp_wled_write_reg(wled, QPNP_WLED_MOD_EN_REG(wled->sink_base, i), reg); if (rc) return rc; - /* SINK EN */ - temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT; - sink_en |= (1 << temp); - } - mask = QPNP_WLED_CURR_SINK_MASK; - rc = qpnp_wled_masked_write_reg(wled, - QPNP_WLED_CURR_SINK_REG(wled->sink_base), - mask, sink_en); - if (rc < 0) { - dev_err(&wled->pdev->dev, - "Failed to enable WLED sink config rc = %d\n", rc); - return rc; + /* SINK EN */ + temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT; + sink_en |= (1 << temp); + } + mask = QPNP_WLED_CURR_SINK_MASK; + rc = qpnp_wled_masked_write_reg(wled, + QPNP_WLED_CURR_SINK_REG(wled->sink_base), + mask, sink_en); + if (rc < 0) { + dev_err(&wled->pdev->dev, + "Failed to enable WLED sink config rc = %d\n", rc); + return rc; } } diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 07f580e0ac15..11c52567304f 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -46,7 +46,6 @@ struct writeset { static void writeset_free(struct writeset *ws) { vfree(ws->bits); - ws->bits = NULL; } static int setup_on_disk_bitset(struct dm_disk_bitset *info, @@ -71,6 +70,8 @@ static size_t bitset_size(unsigned nr_bits) */ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) { + ws->md.nr_bits = nr_blocks; + ws->md.root = INVALID_WRITESET_ROOT; ws->bits = vzalloc(bitset_size(nr_blocks)); if (!ws->bits) { DMERR("%s: couldn't allocate in memory bitset", __func__); @@ -83,14 +84,12 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) /* * Wipes the in-core bitset, and creates a new on disk bitset. */ -static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, - dm_block_t nr_blocks) +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) { int r; - memset(ws->bits, 0, bitset_size(nr_blocks)); + memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); - ws->md.nr_bits = nr_blocks; r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); if (r) { DMERR("%s: setup_on_disk_bitset failed", __func__); @@ -134,7 +133,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info, { int r; - if (!test_bit(block, ws->bits)) { + if (!test_and_set_bit(block, ws->bits)) { r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); if (r) { /* FIXME: fail mode */ @@ -389,7 +388,7 @@ static void ws_dec(void *context, const void *value) static int ws_eq(void *context, const void *value1, const void *value2) { - return !memcmp(value1, value2, sizeof(struct writeset_disk)); + return !memcmp(value1, value2, sizeof(struct writeset_metadata)); } /*----------------------------------------------------------------*/ @@ -565,15 +564,6 @@ static int open_metadata(struct era_metadata *md) } disk = dm_block_data(sblock); - - /* Verify the data block size hasn't changed */ - if (le32_to_cpu(disk->data_block_size) != md->block_size) { - DMERR("changing the data block size (from %u to %llu) is not supported", - le32_to_cpu(disk->data_block_size), md->block_size); - r = -EINVAL; - goto bad; - } - r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, disk->metadata_space_map_root, sizeof(disk->metadata_space_map_root), @@ -585,10 +575,10 @@ static int open_metadata(struct era_metadata *md) setup_infos(md); + md->block_size = le32_to_cpu(disk->data_block_size); md->nr_blocks = le32_to_cpu(disk->nr_blocks); md->current_era = le32_to_cpu(disk->current_era); - ws_unpack(&disk->current_writeset, &md->current_writeset->md); md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); md->era_array_root = le64_to_cpu(disk->era_array_root); md->metadata_snap = le64_to_cpu(disk->metadata_snap); @@ -757,12 +747,6 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md, ws_unpack(&disk, &d->writeset); d->value = cpu_to_le32(key); - /* - * We initialise another bitset info to avoid any caching side effects - * with the previous one. - */ - dm_disk_bitset_init(md->tm, &d->info); - d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); d->current_bit = 0; d->step = metadata_digest_transcribe_writeset; @@ -776,6 +760,12 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d) return 0; memset(d, 0, sizeof(*d)); + + /* + * We initialise another bitset info to avoid any caching side + * effects with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); d->step = metadata_digest_lookup_writeset; return 0; @@ -813,8 +803,6 @@ static struct era_metadata *metadata_open(struct block_device *bdev, static void metadata_close(struct era_metadata *md) { - writeset_free(&md->writesets[0]); - writeset_free(&md->writesets[1]); destroy_persistent_data_objects(md); kfree(md); } @@ -852,7 +840,6 @@ static int metadata_resize(struct era_metadata *md, void *arg) r = writeset_alloc(&md->writesets[1], *new_size); if (r) { DMERR("%s: writeset_alloc failed for writeset 1", __func__); - writeset_free(&md->writesets[0]); return r; } @@ -863,8 +850,6 @@ static int metadata_resize(struct era_metadata *md, void *arg) &value, &md->era_array_root); if (r) { DMERR("%s: dm_array_resize failed", __func__); - writeset_free(&md->writesets[0]); - writeset_free(&md->writesets[1]); return r; } @@ -886,6 +871,7 @@ static int metadata_era_archive(struct era_metadata *md) } ws_pack(&md->current_writeset->md, &value); + md->current_writeset->md.root = INVALID_WRITESET_ROOT; keys[0] = md->current_era; __dm_bless_for_disk(&value); @@ -897,7 +883,6 @@ static int metadata_era_archive(struct era_metadata *md) return r; } - md->current_writeset->md.root = INVALID_WRITESET_ROOT; md->archived_writesets = true; return 0; @@ -914,7 +899,7 @@ static int metadata_new_era(struct era_metadata *md) int r; struct writeset *new_writeset = next_writeset(md); - r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks); + r = writeset_init(&md->bitset_info, new_writeset); if (r) { DMERR("%s: writeset_init failed", __func__); return r; @@ -967,7 +952,7 @@ static int metadata_commit(struct era_metadata *md) int r; struct dm_block *sblock; - if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { + if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, &md->current_writeset->md.root); if (r) { @@ -1242,10 +1227,8 @@ static void process_deferred_bios(struct era *era) int r; struct bio_list deferred_bios, marked_bios; struct bio *bio; - struct blk_plug plug; bool commit_needed = false; bool failed = false; - struct writeset *ws = era->md->current_writeset; bio_list_init(&deferred_bios); bio_list_init(&marked_bios); @@ -1255,11 +1238,9 @@ static void process_deferred_bios(struct era *era) bio_list_init(&era->deferred_bios); spin_unlock(&era->deferred_lock); - if (bio_list_empty(&deferred_bios)) - return; - while ((bio = bio_list_pop(&deferred_bios))) { - r = writeset_test_and_set(&era->md->bitset_info, ws, + r = writeset_test_and_set(&era->md->bitset_info, + era->md->current_writeset, get_block(era, bio)); if (r < 0) { /* @@ -1267,6 +1248,7 @@ static void process_deferred_bios(struct era *era) * FIXME: finish. */ failed = true; + } else if (r == 0) commit_needed = true; @@ -1282,19 +1264,9 @@ static void process_deferred_bios(struct era *era) if (failed) while ((bio = bio_list_pop(&marked_bios))) bio_io_error(bio); - else { - blk_start_plug(&plug); - while ((bio = bio_list_pop(&marked_bios))) { - /* - * Only update the in-core writeset if the on-disk one - * was updated too. - */ - if (commit_needed) - set_bit(get_block(era, bio), ws->bits); + else + while ((bio = bio_list_pop(&marked_bios))) generic_make_request(bio); - } - blk_finish_plug(&plug); - } } static void process_rpc_calls(struct era *era) @@ -1515,6 +1487,15 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) } era->md = md; + era->nr_blocks = calc_nr_blocks(era); + + r = metadata_resize(era->md, &era->nr_blocks); + if (r) { + ti->error = "couldn't resize metadata"; + era_destroy(era); + return -ENOMEM; + } + era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!era->wq) { ti->error = "could not create workqueue for metadata object"; @@ -1592,24 +1573,16 @@ static int era_preresume(struct dm_target *ti) dm_block_t new_size = calc_nr_blocks(era); if (era->nr_blocks != new_size) { - r = metadata_resize(era->md, &new_size); - if (r) { - DMERR("%s: metadata_resize failed", __func__); - return r; - } - - r = metadata_commit(era->md); - if (r) { - DMERR("%s: metadata_commit failed", __func__); + r = in_worker1(era, metadata_resize, &new_size); + if (r) return r; - } era->nr_blocks = new_size; } start_worker(era); - r = in_worker0(era, metadata_era_rollover); + r = in_worker0(era, metadata_new_era); if (r) { DMERR("%s: metadata_era_rollover failed", __func__); return r; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 2071c889d34b..f43bc95d90b5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -524,7 +524,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size) * Grab our output buffer. */ nl = get_result_buffer(param, param_size, &len); - if (len < needed || len < sizeof(nl->dev)) { + if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } @@ -1539,7 +1539,6 @@ static int target_message(struct dm_ioctl *param, size_t param_size) if (!argc) { DMWARN("Empty message received."); - r = -EINVAL; goto out_argv; } diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c index dd6ec0742044..0ec61ee586b7 100644 --- a/drivers/md/dm-req-crypt.c +++ b/drivers/md/dm-req-crypt.c @@ -45,9 +45,7 @@ #define KEY_SIZE_XTS 32 #define AES_XTS_IV_LEN 16 #define MAX_MSM_ICE_KEY_LUT_SIZE 32 -#ifndef SECTOR_SIZE #define SECTOR_SIZE 512 -#endif #define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024) #define DM_REQ_CRYPT_ERROR -1 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 277af895d7c9..55e158553700 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -788,7 +788,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) static uint32_t __minimum_chunk_size(struct origin *o) { struct dm_snapshot *snap; - unsigned chunk_size = rounddown_pow_of_two(UINT_MAX); + unsigned chunk_size = 0; if (o) list_for_each_entry(snap, &o->snapshots, list) @@ -1264,7 +1264,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (!s->store->chunk_size) { ti->error = "Chunk size not set"; - r = -EINVAL; goto bad_read_metadata; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3ce2a9b6d8ff..ee8bc7ea2e32 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -394,23 +394,14 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, { int r; dev_t dev; - unsigned int major, minor; - char dummy; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); - if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { - /* Extract the major/minor numbers */ - dev = MKDEV(major, minor); - if (MAJOR(dev) != major || MINOR(dev) != minor) - return -EOVERFLOW; - } else { - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; - } + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; dd = find_device(&t->devices, dev); if (!dd) { @@ -1164,6 +1155,12 @@ void dm_table_event_callback(struct dm_table *t, void dm_table_event(struct dm_table *t) { + /* + * You can no longer call dm_table_event() from interrupt + * context, use a bottom half instead. + */ + BUG_ON(in_interrupt()); + mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); @@ -1211,46 +1208,6 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } -/* - * type->iterate_devices() should be called when the sanity check needs to - * iterate and check all underlying data devices. iterate_devices() will - * iterate all underlying data devices until it encounters a non-zero return - * code, returned by whether the input iterate_devices_callout_fn, or - * iterate_devices() itself internally. - * - * For some target type (e.g. dm-stripe), one call of iterate_devices() may - * iterate multiple underlying devices internally, in which case a non-zero - * return code returned by iterate_devices_callout_fn will stop the iteration - * in advance. - * - * Cases requiring _any_ underlying device supporting some kind of attribute, - * should use the iteration structure like dm_table_any_dev_attr(), or call - * it directly. @func should handle semantics of positive examples, e.g. - * capable of something. - * - * Cases requiring _all_ underlying devices supporting some kind of attribute, - * should use the iteration structure like dm_table_supports_nowait() or - * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that - * uses an @anti_func that handle semantics of counter examples, e.g. not - * capable of something. So: return !dm_table_any_dev_attr(t, anti_func); - */ -static bool dm_table_any_dev_attr(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned int i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, func, NULL)) - return true; - } - - return false; -} - static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1421,12 +1378,12 @@ static bool dm_table_discard_zeroes_data(struct dm_table *t) return true; } -static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !blk_queue_nonrot(q); + return q && blk_queue_nonrot(q); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1437,12 +1394,29 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } -static int queue_no_sg_merge(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); +} + +static bool dm_table_all_devices_attribute(struct dm_table *t, + iterate_devices_callout_fn func) +{ + struct dm_target *ti; + unsigned i = 0; + + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->type->iterate_devices || + !ti->type->iterate_devices(ti, func, NULL)) + return false; + } + + return true; } static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1535,18 +1509,18 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.discard_zeroes_data = 0; /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_any_dev_attr(t, device_is_rotational)) - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); - else + if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + else + queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; - if (dm_table_any_dev_attr(t, queue_no_sg_merge)) - queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); - else + if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + else + queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); @@ -1556,7 +1530,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_any_dev_attr(t, device_is_not_random)) + if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); /* diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index ca61404c404c..1dd667b97530 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -147,6 +147,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, block = fec_buffer_rs_block(v, fio, n, i); res = fec_decode_rs8(v, fio, block, &par[offset], neras); if (res < 0) { + dm_bufio_release(buf); + r = res; goto error; } @@ -171,8 +173,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, done: r = corrected; error: - dm_bufio_release(buf); - if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); @@ -272,7 +272,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, &is_zero) == 0) { /* skip known zero blocks entirely */ if (is_zero) - goto done; + continue; /* * skip if we have already found the theoretical @@ -450,7 +450,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, fio->level++; if (type == DM_VERITY_BLOCK_TYPE_METADATA) - block = block - v->hash_start + v->data_blocks; + block += v->data_blocks; /* * For RS(M, N), the continuous FEC data is divided into blocks of N diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index f1c5e68c6dd9..965f0e74a0d9 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -77,14 +77,6 @@ struct dm_verity_prefetch_work { struct buffer_aux { int hash_verified; }; -/* - * While system shutdown, skip verity work for I/O error. - */ -static inline bool verity_is_system_shutting_down(void) -{ - return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF - || system_state == SYSTEM_RESTART; -} /* * Initialize struct buffer_aux for a freshly created buffer. @@ -551,8 +543,7 @@ static void verity_end_io(struct bio *bio) { struct dm_verity_io *io = bio->bi_private; - if (bio->bi_error && - (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { + if (bio->bi_error && !verity_fec_is_enabled(io->v)) { verity_finish_io(io, bio->bi_error); return; } diff --git a/drivers/md/md.c b/drivers/md/md.c index b05afab6eaf2..6a5d81ca87ee 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -515,17 +515,6 @@ void mddev_init(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_init); -static struct mddev *mddev_find_locked(dev_t unit) -{ - struct mddev *mddev; - - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == unit) - return mddev; - - return NULL; -} - static struct mddev *mddev_find(dev_t unit) { struct mddev *mddev, *new = NULL; @@ -537,13 +526,13 @@ static struct mddev *mddev_find(dev_t unit) spin_lock(&all_mddevs_lock); if (unit) { - mddev = mddev_find_locked(unit); - if (mddev) { - mddev_get(mddev); - spin_unlock(&all_mddevs_lock); - kfree(new); - return mddev; - } + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == unit) { + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + kfree(new); + return mddev; + } if (new) { list_add(&new->all_mddevs, &all_mddevs); @@ -569,7 +558,12 @@ static struct mddev *mddev_find(dev_t unit) return NULL; } - is_free = !mddev_find_locked(dev); + is_free = 1; + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == dev) { + is_free = 0; + break; + } } new->unit = dev; new->md_minor = MINOR(dev); @@ -7046,7 +7040,8 @@ static int md_open(struct block_device *bdev, fmode_t mode) /* Wait until bdev->bd_disk is definitely gone */ if (work_pending(&mddev->del_work)) flush_workqueue(md_misc_wq); - return -EBUSY; + /* Then retry the open from the top */ + return -ERESTARTSYS; } BUG_ON(mddev != bdev->bd_disk->private_data); diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index 5673f8eb5f88..a240990a7f33 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -34,12 +34,12 @@ struct node_header { __le32 max_entries; __le32 value_size; __le32 padding; -} __attribute__((packed, aligned(8))); +} __packed; struct btree_node { struct node_header header; __le64 keys[0]; -} __attribute__((packed, aligned(8))); +} __packed; /* diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 63f2baed3c8a..eff04fa23dfa 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s, memcpy(n, dm_block_data(child), dm_bm_block_size(dm_tm_get_bm(info->tm))); + dm_tm_unlock(info->tm, child); dm_tm_dec(info->tm, dm_block_location(child)); - dm_tm_unlock(info->tm, child); return 0; } @@ -549,8 +549,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, delete_at(n, index); } - if (!r) - *new_root = shadow_root(&spine); + *new_root = shadow_root(&spine); exit_shadow_spine(&spine); return r; diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index 23b1d22f693c..fa9039a53ae5 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -83,16 +83,14 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, } static int insert_at(size_t value_size, struct btree_node *node, unsigned index, - uint64_t key, void *value) - __dm_written_to_disk(value) + uint64_t key, void *value) + __dm_written_to_disk(value) { uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); - uint32_t max_entries = le32_to_cpu(node->header.max_entries); __le64 key_le = cpu_to_le64(key); if (index > nr_entries || - index >= max_entries || - nr_entries >= max_entries) { + index >= le32_to_cpu(node->header.max_entries)) { DMERR("too many entries in btree node for insert"); __dm_unbless_for_disk(value); return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index 6fa4a68e78b0..22729fd92a1b 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -279,11 +279,6 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) struct disk_index_entry ie_disk; struct dm_block *blk; - if (b >= ll->nr_blocks) { - DMERR_LIMIT("metadata block out of bounds"); - return -EINVAL; - } - b = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) @@ -342,8 +337,6 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, */ begin = do_div(index_begin, ll->entries_per_block); end = do_div(end, ll->entries_per_block); - if (end == 0) - end = ll->entries_per_block; for (i = index_begin; i < index_end; i++, begin = 0) { struct dm_block *blk; diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index 87e17909ef52..8de63ce39bdd 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -33,7 +33,7 @@ struct disk_index_entry { __le64 blocknr; __le32 nr_free; __le32 none_free_before; -} __attribute__ ((packed, aligned(8))); +} __packed; #define MAX_METADATA_BITMAPS 255 @@ -43,7 +43,7 @@ struct disk_metadata_index { __le64 blocknr; struct disk_index_entry index[MAX_METADATA_BITMAPS]; -} __attribute__ ((packed, aligned(8))); +} __packed; struct ll_disk; @@ -86,7 +86,7 @@ struct disk_sm_root { __le64 nr_allocated; __le64 bitmap_root; __le64 ref_count_root; -} __attribute__ ((packed, aligned(8))); +} __packed; #define ENTRIES_PER_BYTE 4 @@ -94,7 +94,7 @@ struct disk_bitmap_header { __le32 csum; __le32 not_used; __le64 blocknr; -} __attribute__ ((packed, aligned(8))); +} __packed; enum allocation_event { SM_NONE, diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index e0acae7a3815..bf4c5e2ccb6f 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -171,14 +171,6 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) * Any block we allocate has to be free in both the old and current ll. */ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); - if (r == -ENOSPC) { - /* - * There's no free block between smd->begin and the end of the metadata device. - * We search before smd->begin in case something has been freed. - */ - r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b); - } - if (r) return r; @@ -207,6 +199,7 @@ static int sm_disk_commit(struct dm_space_map *sm) return r; memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll)); + smd->begin = 0; smd->nr_allocated_this_transaction = 0; r = sm_disk_get_nr_free(sm, &nr_free); diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index 62a4d7da9bd9..967d8f2a731f 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -451,14 +451,6 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) * Any block we allocate has to be free in both the old and current ll. */ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); - if (r == -ENOSPC) { - /* - * There's no free block between smm->begin and the end of the metadata device. - * We search before smm->begin in case something has been freed. - */ - r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b); - } - if (r) return r; @@ -510,6 +502,7 @@ static int sm_metadata_commit(struct dm_space_map *sm) return r; memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); + smm->begin = 0; smm->allocated_this_transaction = 0; return 0; diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index 6d0a0b08853b..df1e8c975cd8 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -524,7 +524,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) ERR("out of memory. aborting.\n"); kfree(vv); v4l2_ctrl_handler_free(hdl); - return -ENOMEM; + return -1; } saa7146_video_uops.init(dev,vv); diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c index 5cc68144771c..2a8d9a36d6f0 100644 --- a/drivers/media/common/siano/smscoreapi.c +++ b/drivers/media/common/siano/smscoreapi.c @@ -914,7 +914,7 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, void *buffer, size_t size) { struct sms_firmware *firmware = (struct sms_firmware *) buffer; - struct sms_msg_data5 *msg; + struct sms_msg_data4 *msg; u32 mem_address, calc_checksum = 0; u32 i, *ptr; u8 *payload = firmware->payload; @@ -995,20 +995,24 @@ static int smscore_load_firmware_family2(struct smscore_device_t *coredev, goto exit_fw_download; if (coredev->mode == DEVICE_MODE_NONE) { + struct sms_msg_data *trigger_msg = + (struct sms_msg_data *) msg; + pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n"); SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_SWDOWNLOAD_TRIGGER_REQ, - sizeof(*msg)); + sizeof(struct sms_msg_hdr) + + sizeof(u32) * 5); - msg->msg_data[0] = firmware->start_address; + trigger_msg->msg_data[0] = firmware->start_address; /* Entry point */ - msg->msg_data[1] = 6; /* Priority */ - msg->msg_data[2] = 0x200; /* Stack size */ - msg->msg_data[3] = 0; /* Parameter */ - msg->msg_data[4] = 4; /* Task ID */ + trigger_msg->msg_data[1] = 6; /* Priority */ + trigger_msg->msg_data[2] = 0x200; /* Stack size */ + trigger_msg->msg_data[3] = 0; /* Parameter */ + trigger_msg->msg_data[4] = 4; /* Task ID */ - rc = smscore_sendrequest_and_wait(coredev, msg, - msg->x_msg_header.msg_length, + rc = smscore_sendrequest_and_wait(coredev, trigger_msg, + trigger_msg->x_msg_header.msg_length, &coredev->trigger_done); } else { SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ, diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index 55d02c27f124..4cc39e4a8318 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h @@ -636,9 +636,9 @@ struct sms_msg_data2 { u32 msg_data[2]; }; -struct sms_msg_data5 { +struct sms_msg_data4 { struct sms_msg_hdr x_msg_header; - u32 msg_data[5]; + u32 msg_data[4]; }; struct sms_data_download { diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 735baa74043c..ce4332e80a91 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -1351,20 +1350,14 @@ static int dvb_net_do_ioctl(struct file *file, struct net_device *netdev; struct dvb_net_priv *priv_data; struct dvb_net_if *dvbnetif = parg; - int if_num = dvbnetif->if_num; - if (if_num >= DVB_NET_DEVICES_MAX) { + if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || + !dvbnet->state[dvbnetif->if_num]) { ret = -EINVAL; goto ioctl_error; } - if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX); - if (!dvbnet->state[if_num]) { - ret = -EINVAL; - goto ioctl_error; - } - - netdev = dvbnet->device[if_num]; + netdev = dvbnet->device[dvbnetif->if_num]; priv_data = netdev_priv(netdev); dvbnetif->pid=priv_data->pid; @@ -1417,20 +1410,14 @@ static int dvb_net_do_ioctl(struct file *file, struct net_device *netdev; struct dvb_net_priv *priv_data; struct __dvb_net_if_old *dvbnetif = parg; - int if_num = dvbnetif->if_num; - - if (if_num >= DVB_NET_DEVICES_MAX) { - ret = -EINVAL; - goto ioctl_error; - } - if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX); - if (!dvbnet->state[if_num]) { + if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX || + !dvbnet->state[dvbnetif->if_num]) { ret = -EINVAL; goto ioctl_error; } - netdev = dvbnet->device[if_num]; + netdev = dvbnet->device[dvbnetif->if_num]; priv_data = netdev_priv(netdev); dvbnetif->pid=priv_data->pid; diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index b8af5a3c707f..94c26270fff0 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -4461,10 +4461,8 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad state->timf_default = cfg->pll->timf; - if (dib8000_identify(&state->i2c) == 0) { - kfree(fe); + if (dib8000_identify(&state->i2c) == 0) goto error; - } dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr); diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c index b43135c5a960..e87ac30d7fb8 100644 --- a/drivers/media/dvb-frontends/sp8870.c +++ b/drivers/media/dvb-frontends/sp8870.c @@ -293,9 +293,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe) sp8870_writereg(state, 0xc05, reg0xc05); // read status reg in order to clear pending irqs - err = sp8870_readreg(state, 0x200); - if (err < 0) - return err; + sp8870_readreg(state, 0x200); // system controller start sp8870_microcontroller_start(state); diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index e85777dfe81d..b35400e4e9af 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -1570,7 +1570,7 @@ static int adv7511_remove(struct i2c_client *client) client->addr << 1, client->adapter->name); adv7511_init_setup(sd); - cancel_delayed_work_sync(&state->edid_handler); + cancel_delayed_work(&state->edid_handler); i2c_unregister_device(state->i2c_edid); i2c_unregister_device(state->i2c_pktmem); destroy_workqueue(state->work_queue); diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c index fb60c9f42cb6..0db15f528ac1 100644 --- a/drivers/media/i2c/mt9p031.c +++ b/drivers/media/i2c/mt9p031.c @@ -81,9 +81,7 @@ #define MT9P031_PIXEL_CLOCK_INVERT (1 << 15) #define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8) #define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0) -#define MT9P031_RESTART 0x0b -#define MT9P031_FRAME_PAUSE_RESTART (1 << 1) -#define MT9P031_FRAME_RESTART (1 << 0) +#define MT9P031_FRAME_RESTART 0x0b #define MT9P031_SHUTTER_DELAY 0x0c #define MT9P031_RST 0x0d #define MT9P031_RST_ENABLE 1 @@ -450,23 +448,9 @@ static int mt9p031_set_params(struct mt9p031 *mt9p031) static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable) { struct mt9p031 *mt9p031 = to_mt9p031(subdev); - struct i2c_client *client = v4l2_get_subdevdata(subdev); - int val; int ret; if (!enable) { - /* enable pause restart */ - val = MT9P031_FRAME_PAUSE_RESTART; - ret = mt9p031_write(client, MT9P031_RESTART, val); - if (ret < 0) - return ret; - - /* enable restart + keep pause restart set */ - val |= MT9P031_FRAME_RESTART; - ret = mt9p031_write(client, MT9P031_RESTART, val); - if (ret < 0) - return ret; - /* Stop sensor readout */ ret = mt9p031_set_output_control(mt9p031, MT9P031_OUTPUT_CONTROL_CEN, 0); @@ -486,16 +470,6 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable) if (ret < 0) return ret; - /* - * - clear pause restart - * - don't clear restart as clearing restart manually can cause - * undefined behavior - */ - val = MT9P031_FRAME_RESTART; - ret = mt9p031_write(client, MT9P031_RESTART, val); - if (ret < 0) - return ret; - return mt9p031_pll_enable(mt9p031); } diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index 376ffa19555d..51b26010403c 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1393,7 +1393,7 @@ static int __s5c73m3_power_on(struct s5c73m3 *state) s5c73m3_gpio_deassert(state, STBY); usleep_range(100, 200); - s5c73m3_gpio_deassert(state, RSET); + s5c73m3_gpio_deassert(state, RST); usleep_range(50, 100); return 0; @@ -1408,7 +1408,7 @@ static int __s5c73m3_power_off(struct s5c73m3 *state) { int i, ret; - if (s5c73m3_gpio_assert(state, RSET)) + if (s5c73m3_gpio_assert(state, RST)) usleep_range(10, 50); if (s5c73m3_gpio_assert(state, STBY)) @@ -1613,7 +1613,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) state->mclk_frequency = pdata->mclk_frequency; state->gpio[STBY] = pdata->gpio_stby; - state->gpio[RSET] = pdata->gpio_reset; + state->gpio[RST] = pdata->gpio_reset; return 0; } diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h index 01f57055e20f..13aed59f0f5d 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3.h +++ b/drivers/media/i2c/s5c73m3/s5c73m3.h @@ -361,7 +361,7 @@ struct s5c73m3_ctrls { enum s5c73m3_gpio_id { STBY, - RSET, + RST, GPIO_NUM, }; diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c index 4959edcb76cd..97084237275d 100644 --- a/drivers/media/i2c/s5k4ecgx.c +++ b/drivers/media/i2c/s5k4ecgx.c @@ -177,7 +177,7 @@ static const char * const s5k4ecgx_supply_names[] = { enum s5k4ecgx_gpio_id { STBY, - RSET, + RST, GPIO_NUM, }; @@ -482,7 +482,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv) if (s5k4ecgx_gpio_set_value(priv, STBY, priv->gpio[STBY].level)) usleep_range(30, 50); - if (s5k4ecgx_gpio_set_value(priv, RSET, priv->gpio[RSET].level)) + if (s5k4ecgx_gpio_set_value(priv, RST, priv->gpio[RST].level)) usleep_range(30, 50); return 0; @@ -490,7 +490,7 @@ static int __s5k4ecgx_power_on(struct s5k4ecgx *priv) static int __s5k4ecgx_power_off(struct s5k4ecgx *priv) { - if (s5k4ecgx_gpio_set_value(priv, RSET, !priv->gpio[RSET].level)) + if (s5k4ecgx_gpio_set_value(priv, RST, !priv->gpio[RST].level)) usleep_range(30, 50); if (s5k4ecgx_gpio_set_value(priv, STBY, !priv->gpio[STBY].level)) @@ -878,7 +878,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv, int ret; priv->gpio[STBY].gpio = -EINVAL; - priv->gpio[RSET].gpio = -EINVAL; + priv->gpio[RST].gpio = -EINVAL; ret = s5k4ecgx_config_gpio(gpio->gpio, gpio->level, "S5K4ECGX_STBY"); @@ -897,7 +897,7 @@ static int s5k4ecgx_config_gpios(struct s5k4ecgx *priv, s5k4ecgx_free_gpios(priv); return ret; } - priv->gpio[RSET] = *gpio; + priv->gpio[RST] = *gpio; if (gpio_is_valid(gpio->gpio)) gpio_set_value(gpio->gpio, 0); diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c index a9052219a278..774e0d0c94cb 100644 --- a/drivers/media/i2c/s5k5baf.c +++ b/drivers/media/i2c/s5k5baf.c @@ -238,7 +238,7 @@ struct s5k5baf_gpio { enum s5k5baf_gpio_id { STBY, - RSET, + RST, NUM_GPIOS, }; @@ -973,7 +973,7 @@ static int s5k5baf_power_on(struct s5k5baf *state) s5k5baf_gpio_deassert(state, STBY); usleep_range(50, 100); - s5k5baf_gpio_deassert(state, RSET); + s5k5baf_gpio_deassert(state, RST); return 0; err_reg_dis: @@ -991,7 +991,7 @@ static int s5k5baf_power_off(struct s5k5baf *state) state->apply_cfg = 0; state->apply_crop = 0; - s5k5baf_gpio_assert(state, RSET); + s5k5baf_gpio_assert(state, RST); s5k5baf_gpio_assert(state, STBY); if (!IS_ERR(state->clock)) diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c index ca1c0568a561..5ac2babe123b 100644 --- a/drivers/media/i2c/s5k6aa.c +++ b/drivers/media/i2c/s5k6aa.c @@ -181,7 +181,7 @@ static const char * const s5k6aa_supply_names[] = { enum s5k6aa_gpio_id { STBY, - RSET, + RST, GPIO_NUM, }; @@ -845,7 +845,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa) ret = s5k6aa->s_power(1); usleep_range(4000, 4000); - if (s5k6aa_gpio_deassert(s5k6aa, RSET)) + if (s5k6aa_gpio_deassert(s5k6aa, RST)) msleep(20); return ret; @@ -855,7 +855,7 @@ static int __s5k6aa_power_off(struct s5k6aa *s5k6aa) { int ret; - if (s5k6aa_gpio_assert(s5k6aa, RSET)) + if (s5k6aa_gpio_assert(s5k6aa, RST)) usleep_range(100, 150); if (s5k6aa->s_power) { @@ -1514,7 +1514,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa, int ret; s5k6aa->gpio[STBY].gpio = -EINVAL; - s5k6aa->gpio[RSET].gpio = -EINVAL; + s5k6aa->gpio[RST].gpio = -EINVAL; gpio = &pdata->gpio_stby; if (gpio_is_valid(gpio->gpio)) { @@ -1537,7 +1537,7 @@ static int s5k6aa_configure_gpios(struct s5k6aa *s5k6aa, if (ret < 0) return ret; - s5k6aa->gpio[RSET] = *gpio; + s5k6aa->gpio[RST] = *gpio; } return 0; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index c3befb3f5dcd..1e95fdb61041 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -1761,7 +1761,6 @@ static int tc358743_probe_of(struct tc358743_state *state) bps_pr_lane = 2 * endpoint->link_frequencies[0]; if (bps_pr_lane < 62500000U || bps_pr_lane > 1000000000U) { dev_err(dev, "unsupported bps per lane: %u bps\n", bps_pr_lane); - ret = -EINVAL; goto disable_clk; } diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index 2f496c05a331..8b5e0b3a92a0 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -184,8 +184,6 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) dma_addr_t cur_addr = fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2; u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0; - if (cur_pos > fc_pci->dma[0].size * 2) - goto error; deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, " "last_cur_pos: %08x ", @@ -227,7 +225,6 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) ret = IRQ_NONE; } -error: spin_unlock_irqrestore(&fc_pci->irq_lock, flags); return ret; } diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index c678d7120727..90fcccc05b56 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -494,9 +494,6 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) btwrite(0, BT878_AINT_MASK); bt878_num++; - if (!bt->tasklet.func) - tasklet_disable(&bt->tasklet); - return 0; fail2: diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index b43cf85ed5f0..54398d8a4696 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c @@ -990,10 +990,8 @@ int cx25821_riscmem_alloc(struct pci_dev *pci, __le32 *cpu; dma_addr_t dma = 0; - if (risc->cpu && risc->size < size) { + if (NULL != risc->cpu && risc->size < size) pci_free_consistent(pci, risc->size, risc->cpu, risc->dma); - risc->cpu = NULL; - } if (NULL == risc->cpu) { cpu = pci_zalloc_consistent(pci, size, &dma); if (NULL == cpu) diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index a83ba068b837..3fdbd81b5580 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -253,24 +253,19 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id) if ((reg40 & AVL_IRQ_ASSERTED) != 0) { /* IRQ is being signaled */ reg_isr = readw(ndev->bmmio0 + REG_ISR); - if (reg_isr & NETUP_UNIDVB_IRQ_SPI) + if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) { + iret = netup_i2c_interrupt(&ndev->i2c[0]); + } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) { + iret = netup_i2c_interrupt(&ndev->i2c[1]); + } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) { iret = netup_spi_interrupt(ndev->spi); - else if (!ndev->old_fw) { - if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) { - iret = netup_i2c_interrupt(&ndev->i2c[0]); - } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) { - iret = netup_i2c_interrupt(&ndev->i2c[1]); - } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) { - iret = netup_dma_interrupt(&ndev->dma[0]); - } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) { - iret = netup_dma_interrupt(&ndev->dma[1]); - } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) { - iret = netup_ci_interrupt(ndev); - } else { - goto err; - } + } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) { + iret = netup_dma_interrupt(&ndev->dma[0]); + } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) { + iret = netup_dma_interrupt(&ndev->dma[1]); + } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) { + iret = netup_ci_interrupt(ndev); } else { -err: dev_err(&pci_dev->dev, "%s(): unknown interrupt 0x%x\n", __func__, reg_isr); diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c index 019bbc18cede..f33c0de3e849 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c @@ -184,7 +184,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) struct spi_master *master; struct netup_spi *nspi; - master = devm_spi_alloc_master(&ndev->pci_dev->dev, + master = spi_alloc_master(&ndev->pci_dev->dev, sizeof(struct netup_spi)); if (!master) { dev_err(&ndev->pci_dev->dev, @@ -217,7 +217,6 @@ int netup_spi_init(struct netup_unidvb_dev *ndev) ndev->pci_slot, ndev->pci_func); if (!spi_new_device(master, &netup_spi_board)) { - spi_unregister_master(master); ndev->spi = NULL; dev_err(&ndev->pci_dev->dev, "%s(): unable to create SPI device\n", __func__); @@ -236,13 +235,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) if (!spi) return; - spi_unregister_master(spi->master); spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); reg = readw(&spi->regs->control_stat); writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat); spin_unlock_irqrestore(&spi->lock, flags); + spi_unregister_master(spi->master); ndev->spi = NULL; } diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index f0b989900877..1b92d836a564 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -402,7 +402,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config) com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; com.cmd.hdr.Length = 6; - memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); + memcpy(&com.cmd.ConfigureBuffers.config, config, 6); com.in_len = 6; com.out_len = 0; diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index da154c406545..fa30930d7047 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h @@ -407,14 +407,12 @@ enum _BUFFER_CONFIGS { struct FW_CONFIGURE_FREE_BUFFERS { struct FW_HEADER hdr; - struct { - u8 UVI1_BufferLength; - u8 UVI2_BufferLength; - u8 TVO_BufferLength; - u8 AUD1_BufferLength; - u8 AUD2_BufferLength; - u8 TVA_BufferLength; - } __packed config; + u8 UVI1_BufferLength; + u8 UVI2_BufferLength; + u8 TVO_BufferLength; + u8 AUD1_BufferLength; + u8 AUD2_BufferLength; + u8 TVA_BufferLength; } __attribute__ ((__packed__)); struct FW_CONFIGURE_UART { diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c index ae3b96e9cff3..56b932c97196 100644 --- a/drivers/media/pci/saa7134/saa7134-empress.c +++ b/drivers/media/pci/saa7134/saa7134-empress.c @@ -295,11 +295,8 @@ static int empress_init(struct saa7134_dev *dev) q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &dev->lock; err = vb2_queue_init(q); - if (err) { - video_device_release(dev->empress_dev); - dev->empress_dev = NULL; + if (err) return err; - } dev->empress_dev->queue = q; video_set_drvdata(dev->empress_dev, dev); diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index f09f311a316f..343cd75fcd8d 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -296,12 +296,7 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d hexium_set_input(hexium, 0); hexium->cur_input = 0; - ret = saa7146_vv_init(dev, &vv_data); - if (ret) { - i2c_del_adapter(&hexium->i2c_adapter); - kfree(hexium); - return ret; - } + saa7146_vv_init(dev, &vv_data); vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 2f3b4e01ff28..214396b1ca73 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -366,16 +366,10 @@ static struct saa7146_ext_vv vv_data; static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; - int ret; DEB_EE("\n"); - ret = saa7146_vv_init(dev, &vv_data); - if (ret) { - pr_err("Error in saa7146_vv_init()\n"); - return ret; - } - + saa7146_vv_init(dev, &vv_data); vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 56691a79ef08..0ca1e07ae783 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -652,17 +652,16 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio * struct mxb *mxb = (struct mxb *)dev->ext_priv; DEB_D("VIDIOC_S_AUDIO %d\n", a->index); - if (a->index >= 32 || - !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index))) - return -EINVAL; - - if (mxb->cur_audinput != a->index) { - mxb->cur_audinput = a->index; - tea6420_route(mxb, a->index); - if (mxb->cur_audinput == 0) - mxb_update_audmode(mxb); + if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) { + if (mxb->cur_audinput != a->index) { + mxb->cur_audinput = a->index; + tea6420_route(mxb, a->index); + if (mxb->cur_audinput == 0) + mxb_update_audmode(mxb); + } + return 0; } - return 0; + return -EINVAL; } #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -694,16 +693,10 @@ static struct saa7146_ext_vv vv_data; static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct mxb *mxb; - int ret; DEB_EE("dev:%p\n", dev); - ret = saa7146_vv_init(dev, &vv_data); - if (ret) { - ERR("Error in saa7146_vv_init()"); - return ret; - } - + saa7146_vv_init(dev, &vv_data); if (mxb_probe(dev)) { saa7146_vv_release(dev); return -1; diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c index 966de363c575..1b184c39ba97 100644 --- a/drivers/media/pci/saa7164/saa7164-encoder.c +++ b/drivers/media/pci/saa7164/saa7164-encoder.c @@ -1031,7 +1031,7 @@ int saa7164_encoder_register(struct saa7164_port *port) "(errno = %d), NO PCI configuration\n", __func__, result); result = -ENOMEM; - goto fail_pci; + goto failed; } /* Establish encoder defaults here */ @@ -1085,7 +1085,7 @@ int saa7164_encoder_register(struct saa7164_port *port) 100000, ENCODER_DEF_BITRATE); if (hdl->error) { result = hdl->error; - goto fail_hdl; + goto failed; } port->std = V4L2_STD_NTSC_M; @@ -1103,7 +1103,7 @@ int saa7164_encoder_register(struct saa7164_port *port) printk(KERN_INFO "%s: can't allocate mpeg device\n", dev->name); result = -ENOMEM; - goto fail_hdl; + goto failed; } port->v4l_device->ctrl_handler = hdl; @@ -1114,7 +1114,10 @@ int saa7164_encoder_register(struct saa7164_port *port) if (result < 0) { printk(KERN_INFO "%s: can't register mpeg device\n", dev->name); - goto fail_reg; + /* TODO: We're going to leak here if we don't dealloc + The buffers above. The unreg function can't deal wit it. + */ + goto failed; } printk(KERN_INFO "%s: registered device video%d [mpeg]\n", @@ -1136,14 +1139,9 @@ int saa7164_encoder_register(struct saa7164_port *port) saa7164_api_set_encoder(port); saa7164_api_get_encoder(port); - return 0; -fail_reg: - video_device_release(port->v4l_device); - port->v4l_device = NULL; -fail_hdl: - v4l2_ctrl_handler_free(hdl); -fail_pci: + result = 0; +failed: return result; } diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c index 30407955a4d1..4a37a1c51c48 100644 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c @@ -385,7 +385,7 @@ int solo_g723_init(struct solo_dev *solo_dev) ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev)); if (ret < 0) - goto snd_error; + return ret; ret = solo_snd_pcm_init(solo_dev); if (ret < 0) diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index 4ffd6e4e54e0..4705a12bbcd1 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2018, 2020 The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 21968ef9dc45..2b939555cccb 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -282,9 +282,6 @@ static int g2d_release(struct file *file) struct g2d_dev *dev = video_drvdata(file); struct g2d_ctx *ctx = fh2ctx(file->private_data); - mutex_lock(&dev->mutex); - v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); - mutex_unlock(&dev->mutex); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index daa5b4dea092..7727789dbda1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1102,7 +1102,7 @@ static int s5p_mfc_probe(struct platform_device *pdev) spin_lock_init(&dev->condlock); dev->plat_dev = pdev; if (!dev->plat_dev) { - mfc_err("No platform data specified\n"); + dev_err(&pdev->dev, "No platform data specified\n"); return -ENODEV; } diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index 7f6fa77e4775..ffe5531dfc81 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -1011,7 +1011,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh, return -EINVAL; } dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags); - dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags); + dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags); return 0; } diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 24804ce70f52..0836fa442d22 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -24,7 +24,7 @@ /* driver definitions */ #define DRIVER_AUTHOR "Joonyoung Shim "; -#define DRIVER_CARD "Silicon Labs Si470x FM Radio" +#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" #define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.2" diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c index 6fd1e4f26f5f..c9347d5aac04 100644 --- a/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/drivers/media/radio/si470x/radio-si470x-usb.c @@ -29,7 +29,7 @@ /* driver definitions */ #define DRIVER_AUTHOR "Tobias Lorenz " -#define DRIVER_CARD "Silicon Labs Si470x FM Radio" +#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" #define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.10" diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c index 645ea00c472a..b36e51576f8e 100644 --- a/drivers/media/rc/igorplugusb.c +++ b/drivers/media/rc/igorplugusb.c @@ -73,11 +73,9 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len) if (start >= len) { dev_err(ir->dev, "receive overflow invalid: %u", overflow); } else { - if (overflow > 0) { + if (overflow > 0) dev_warn(ir->dev, "receive overflow, at least %u lost", overflow); - ir_raw_event_reset(ir->rc); - } do { rawir.duration = ir->buf_in[i] * 85333; diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index e8bc02ce9b2f..63165d324fff 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -292,12 +292,6 @@ static irqreturn_t ite_cir_isr(int irq, void *data) /* read the interrupt flags */ iflags = dev->params.get_irq_causes(dev); - /* Check for RX overflow */ - if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) { - dev_warn(&dev->rdev->dev, "receive overflow\n"); - ir_raw_event_reset(dev->rdev); - } - /* check for the receive interrupt */ if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) { /* read the FIFO bytes */ diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 71e6d4d935c9..0fba4a2c1602 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1079,7 +1079,6 @@ static void mceusb_dev_recv(struct urb *urb) case -ECONNRESET: case -ENOENT: case -EILSEQ: - case -EPROTO: case -ESHUTDOWN: usb_unlink_urb(urb); return; @@ -1118,7 +1117,7 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) */ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, - data, USB_CTRL_MSG_SZ, 3000); + data, USB_CTRL_MSG_SZ, HZ * 3); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); @@ -1126,20 +1125,20 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) /* set feature: bit rate 38400 bps */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, - 0xc04e, 0x0000, NULL, 0, 3000); + 0xc04e, 0x0000, NULL, 0, HZ * 3); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 4, USB_TYPE_VENDOR, - 0x0808, 0x0000, NULL, 0, 3000); + 0x0808, 0x0000, NULL, 0, HZ * 3); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 2, USB_TYPE_VENDOR, - 0x0000, 0x0100, NULL, 0, 3000); + 0x0000, 0x0100, NULL, 0, HZ * 3); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index b3810b85e7d5..63dace8198b0 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c @@ -55,7 +55,7 @@ static int loop_set_tx_mask(struct rc_dev *dev, u32 mask) if ((mask & (RXMASK_REGULAR | RXMASK_LEARNING)) != mask) { dprintk("invalid tx mask: %u\n", mask); - return 2; + return -EINVAL; } dprintk("setting tx mask: %u\n", mask); diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c index 3ad7b67797e9..40f77685cc4a 100644 --- a/drivers/media/rc/sunxi-cir.c +++ b/drivers/media/rc/sunxi-cir.c @@ -132,8 +132,6 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id) } else if (status & REG_RXINT_RPEI_EN) { ir_raw_event_set_idle(ir->rc, true); ir_raw_event_handle(ir->rc); - } else { - ir_raw_event_handle(ir->rc); } spin_unlock(&ir->ir_lock); diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c index f78caf3c2bbd..504bfbc4027a 100644 --- a/drivers/media/tuners/m88rs6000t.c +++ b/drivers/media/tuners/m88rs6000t.c @@ -535,7 +535,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) PGA2_cri = PGA2_GC >> 2; PGA2_crf = PGA2_GC & 0x03; - for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++) + for (i = 0; i <= RF_GC; i++) RFG += RFGS[i]; if (RF_GC == 0) @@ -547,12 +547,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength) if (RF_GC == 3) RFG += 100; - for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++) + for (i = 0; i <= IF_GC; i++) IFG += IFGS[i]; TIAG = TIA_GC * TIA_GS; - for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++) + for (i = 0; i <= BB_GC; i++) BBG += BBGS[i]; PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS; diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c index 64d98517f470..3a12ef35682b 100644 --- a/drivers/media/tuners/msi001.c +++ b/drivers/media/tuners/msi001.c @@ -464,13 +464,6 @@ static int msi001_probe(struct spi_device *spi) V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1); dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000); - if (dev->hdl.error) { - ret = dev->hdl.error; - dev_err(&spi->dev, "Could not initialize controls\n"); - /* control init failed, free handler */ - goto err_ctrl_handler_free; - } - v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false); dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1); diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h index 80a7af6482ae..cdef677d57ec 100644 --- a/drivers/media/usb/cpia2/cpia2.h +++ b/drivers/media/usb/cpia2/cpia2.h @@ -442,7 +442,6 @@ int cpia2_send_command(struct camera_data *cam, struct cpia2_command *cmd); int cpia2_do_command(struct camera_data *cam, unsigned int command, unsigned char direction, unsigned char param); -void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf); struct camera_data *cpia2_init_camera_struct(struct usb_interface *intf); int cpia2_init_camera(struct camera_data *cam); int cpia2_allocate_buffers(struct camera_data *cam); diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index 35c9e00267d5..187012ce444b 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -2158,18 +2158,6 @@ static void reset_camera_struct(struct camera_data *cam) cam->height = cam->params.roi.height; } -/****************************************************************************** - * - * cpia2_init_camera_struct - * - * Deinitialize camera struct - *****************************************************************************/ -void cpia2_deinit_camera_struct(struct camera_data *cam, struct usb_interface *intf) -{ - v4l2_device_unregister(&cam->v4l2_dev); - kfree(cam); -} - /****************************************************************************** * * cpia2_init_camera_struct diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 7bd50feadfe4..76b9cb940b87 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -835,13 +835,15 @@ static int cpia2_usb_probe(struct usb_interface *intf, ret = set_alternate(cam, USBIF_CMDONLY); if (ret < 0) { ERR("%s: usb_set_interface error (ret = %d)\n", __func__, ret); - goto alt_err; + kfree(cam); + return ret; } if((ret = cpia2_init_camera(cam)) < 0) { ERR("%s: failed to initialize cpia2 camera (ret = %d)\n", __func__, ret); - goto alt_err; + kfree(cam); + return ret; } LOG(" CPiA Version: %d.%02d (%d.%d)\n", cam->params.version.firmware_revision_hi, @@ -861,14 +863,11 @@ static int cpia2_usb_probe(struct usb_interface *intf, ret = cpia2_register_camera(cam); if (ret < 0) { ERR("%s: Failed to register cpia2 camera (ret = %d)\n", __func__, ret); - goto alt_err; + kfree(cam); + return ret; } return 0; - -alt_err: - cpia2_deinit_camera_struct(cam, intf); - return ret; } /****************************************************************************** diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index b586a23ab588..09c97847bf95 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -445,7 +445,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) - lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); + lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 89d474998e3e..5a503a6bb8c5 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -50,16 +50,7 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) } else { /* read */ requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); - - /* - * Zero-length transfers must use usb_sndctrlpipe() and - * rtl28xxu_identify_state() uses a zero-length i2c read - * command to determine the chip type. - */ - if (req->size) - pipe = usb_rcvctrlpipe(d->udev, 0); - else - pipe = usb_sndctrlpipe(d->udev, 0); + pipe = usb_rcvctrlpipe(d->udev, 0); } ret = usb_control_msg(d->udev, pipe, 0, requesttype, req->value, diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c index c58fb74c3cd7..92e47d6c3ee3 100644 --- a/drivers/media/usb/dvb-usb/az6027.c +++ b/drivers/media/usb/dvb-usb/az6027.c @@ -394,7 +394,6 @@ static struct rc_map_table rc_map_az6027_table[] = { /* remote control stuff (does not work with my box) */ static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { - *state = REMOTE_NO_KEY_PRESSED; return 0; } diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index d2e1c126ad99..72bde33211b2 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -1789,7 +1789,7 @@ static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = { .size_of_priv = sizeof(struct cxusb_state), - .num_adapters = 1, + .num_adapters = 2, .adapter = { { .num_frontends = 1, diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index ee784041f090..49dd3ea2947d 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -583,6 +583,8 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint); if (onoff) st->channel_state |= 1 << (adap->id); + else + st->channel_state |= 1 << ~(adap->id); } else { if (onoff) st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2); diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c index 5a1dc0d465d2..7b15aea2723d 100644 --- a/drivers/media/usb/dvb-usb/dibusb-common.c +++ b/drivers/media/usb/dvb-usb/dibusb-common.c @@ -182,7 +182,7 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) u8 *buf; int rc; - buf = kzalloc(2, GFP_KERNEL); + buf = kmalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 151212168c9f..97a89ef7e4c1 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -82,17 +82,11 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) } } - ret = dvb_usb_adapter_stream_init(adap); - if (ret) + if ((ret = dvb_usb_adapter_stream_init(adap)) || + (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) || + (ret = dvb_usb_adapter_frontend_init(adap))) { return ret; - - ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs); - if (ret) - goto dvb_init_err; - - ret = dvb_usb_adapter_frontend_init(adap); - if (ret) - goto frontend_init_err; + } /* use exclusive FE lock if there is multiple shared FEs */ if (adap->fe_adap[1].fe) @@ -112,12 +106,6 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) } return 0; - -frontend_init_err: - dvb_usb_adapter_dvb_exit(adap); -dvb_init_err: - dvb_usb_adapter_stream_exit(adap); - return ret; } static int dvb_usb_adapter_exit(struct dvb_usb_device *d) diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index dd80b737d4da..ce4c4e3b58bb 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -466,8 +466,7 @@ extern int dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16,int) extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16); /* commonly used remote control parsing */ -int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *d, u8 keybuf[5], - u32 *event, int *state); +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *); /* commonly used firmware download types and function */ struct hexline { diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c index 7225ae1905eb..5d0384dd45b5 100644 --- a/drivers/media/usb/dvb-usb/gp8psk.c +++ b/drivers/media/usb/dvb-usb/gp8psk.c @@ -163,7 +163,7 @@ out_rel_fw: static int gp8psk_power_ctrl(struct dvb_usb_device *d, int onoff) { - u8 status = 0, buf; + u8 status, buf; int gp_product_id = le16_to_cpu(d->udev->descriptor.idProduct); if (onoff) { diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index 5b806779e210..eafc5c82467f 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -284,13 +284,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { - char *read = kmalloc(1, GFP_KERNEL); - if (!read) { - ret = -ENOMEM; - kfree(read); - goto unlock; - } - for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ @@ -298,12 +291,9 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, - read, 1)) != 0) + &msg[i].buf[j], 1)) != 0) goto unlock; - msg[i].buf[j] = read[0]; } - - kfree(read); } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c index c570c4af64f3..6c55384e2fca 100644 --- a/drivers/media/usb/dvb-usb/nova-t-usb2.c +++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c @@ -122,7 +122,7 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state) static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) { - int i, ret; + int i; u8 b; mac[0] = 0x00; @@ -131,9 +131,7 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6]) /* this is a complete guess, but works for my box */ for (i = 136; i < 139; i++) { - ret = dibusb_read_eeprom_byte(d, i, &b); - if (ret) - return ret; + dibusb_read_eeprom_byte(d,i, &b); mac[5 - (i - 136)] = b; } diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c index 55d515507f0e..ee1e19e36445 100644 --- a/drivers/media/usb/dvb-usb/vp702x.c +++ b/drivers/media/usb/dvb-usb/vp702x.c @@ -294,22 +294,16 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6]) { u8 i, *buf; - int ret; struct vp702x_device_state *st = d->priv; mutex_lock(&st->buf_mutex); buf = st->buf; - for (i = 6; i < 12; i++) { - ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, - &buf[i - 6], 1); - if (ret < 0) - goto err; - } + for (i = 6; i < 12; i++) + vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1); memcpy(mac, buf, 6); -err: mutex_unlock(&st->buf_mutex); - return ret; + return 0; } static int vp702x_frontend_attach(struct dvb_usb_adapter *adap) diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index f9c627492121..37456079f490 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -99,7 +99,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, mutex_lock(&dev->ctrl_urb_lock); ret = usb_control_msg(dev->udev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, 1000); + 0x0000, reg, dev->urb_buf, len, HZ); if (ret < 0) { if (reg_debug) printk(" failed!\n"); @@ -182,7 +182,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, memcpy(dev->urb_buf, buf, len); ret = usb_control_msg(dev->udev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, 1000); + 0x0000, reg, dev->urb_buf, len, HZ); mutex_unlock(&dev->ctrl_urb_lock); if (ret < 0) diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index a19c89009bf3..5502a0fb94fd 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c @@ -1757,7 +1757,6 @@ ret: return result; out_free: - em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE); kfree(dvb); dev->dvb = NULL; goto ret; diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c index 11429bf28c8a..ae1cfa792c58 100644 --- a/drivers/media/usb/go7007/go7007-driver.c +++ b/drivers/media/usb/go7007/go7007-driver.c @@ -698,23 +698,49 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board, struct device *dev) { struct go7007 *go; + int i; go = kzalloc(sizeof(struct go7007), GFP_KERNEL); if (go == NULL) return NULL; go->dev = dev; go->board_info = board; + go->board_id = 0; go->tuner_type = -1; + go->channel_number = 0; + go->name[0] = 0; mutex_init(&go->hw_lock); init_waitqueue_head(&go->frame_waitq); spin_lock_init(&go->spinlock); go->status = STATUS_INIT; + memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter)); + go->i2c_adapter_online = 0; + go->interrupt_available = 0; init_waitqueue_head(&go->interrupt_waitq); + go->input = 0; go7007_update_board(go); + go->encoder_h_halve = 0; + go->encoder_v_halve = 0; + go->encoder_subsample = 0; go->format = V4L2_PIX_FMT_MJPEG; go->bitrate = 1500000; go->fps_scale = 1; + go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; + go->gop_size = 0; + go->ipb = 0; + go->closed_gop = 0; + go->repeat_seqhead = 0; + go->seq_header_enable = 0; + go->gop_header_enable = 0; + go->dvd_mode = 0; + go->interlace_coding = 0; + for (i = 0; i < 4; ++i) + go->modet[i].enable = 0; + for (i = 0; i < 1624; ++i) + go->modet_map[i] = 0; + go->audio_deliver = NULL; + go->audio_enabled = 0; return go; } diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index a4f64bdb8017..3733c15c753e 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -2130,9 +2130,6 @@ out: input_unregister_device(gspca_dev->input_dev); #endif v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler); - v4l2_device_unregister(&gspca_dev->v4l2_dev); - if (sd_desc->probe_error) - sd_desc->probe_error(gspca_dev); kfree(gspca_dev->usb_buf); kfree(gspca_dev); return ret; diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h index bec8fccc2c94..d39adf90303b 100644 --- a/drivers/media/usb/gspca/gspca.h +++ b/drivers/media/usb/gspca/gspca.h @@ -101,7 +101,6 @@ struct sd_desc { cam_cf_op config; /* called on probe */ cam_op init; /* called on probe and resume */ cam_op init_controls; /* called on probe */ - cam_v_op probe_error; /* called if probe failed, do cleanup here */ cam_op start; /* called on stream on after URBs creation */ cam_pkt_op pkt_scan; /* optional operations */ diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c index 971253dafb57..4bf5c43424b7 100644 --- a/drivers/media/usb/gspca/m5602/m5602_po1030.c +++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c @@ -55,7 +55,6 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = { int po1030_probe(struct sd *sd) { u8 dev_id_h = 0, i; - int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { @@ -74,13 +73,10 @@ int po1030_probe(struct sd *sd) for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { u8 data = preinit_po1030[i][2]; if (preinit_po1030[i][0] == SENSOR) - err = m5602_write_sensor(sd, preinit_po1030[i][1], - &data, 1); + m5602_write_sensor(sd, + preinit_po1030[i][1], &data, 1); else - err = m5602_write_bridge(sd, preinit_po1030[i][1], - data); - if (err < 0) - return err; + m5602_write_bridge(sd, preinit_po1030[i][1], data); } if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c index efb5e553b772..a7ae0ec9fa91 100644 --- a/drivers/media/usb/gspca/sq905.c +++ b/drivers/media/usb/gspca/sq905.c @@ -130,7 +130,7 @@ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) } ret = usb_control_msg(gspca_dev->dev, - usb_rcvctrlpipe(gspca_dev->dev, 0), + usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, @@ -172,7 +172,7 @@ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; - int act_len = 0; + int act_len; gspca_dev->usb_buf[0] = '\0'; if (need_lock) diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c index 40d4c99debb8..7d255529ed4c 100644 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c @@ -541,21 +541,12 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); -static void stv06xx_probe_error(struct gspca_dev *gspca_dev) -{ - struct sd *sd = (struct sd *)gspca_dev; - - kfree(sd->sensor_priv); - sd->sensor_priv = NULL; -} - /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = stv06xx_config, .init = stv06xx_init, .init_controls = stv06xx_init_controls, - .probe_error = stv06xx_probe_error, .start = stv06xx_start, .stopN = stv06xx_stopN, .pkt_scan = stv06xx_pkt_scan, diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index 949915734d57..cc3e1478c5a0 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -255,10 +255,6 @@ static void reg_r(struct gspca_dev *gspca_dev, PERR("reg_r: buffer overflow\n"); return; } - if (len == 0) { - PERR("reg_r: zero-length read\n"); - return; - } if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(gspca_dev->dev, @@ -267,7 +263,7 @@ static void reg_r(struct gspca_dev *gspca_dev, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, - gspca_dev->usb_buf, len, + len ? gspca_dev->usb_buf : NULL, len, 500); if (ret < 0) { pr_err("reg_r err %d\n", ret); @@ -743,7 +739,7 @@ static int sd_start(struct gspca_dev *gspca_dev) case MegaImageVI: reg_w_riv(gspca_dev, 0xf0, 0, 0); spca504B_WaitCmdStatus(gspca_dev); - reg_w_riv(gspca_dev, 0xf0, 4, 0); + reg_r(gspca_dev, 0xf0, 4, 0); spca504B_WaitCmdStatus(gspca_dev); break; default: diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 9f95b048123d..7b5c493f02b0 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -297,7 +297,7 @@ static int hdpvr_probe(struct usb_interface *interface, /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); - goto error_free_dev; + goto error; } mutex_init(&dev->io_mutex); @@ -306,7 +306,7 @@ static int hdpvr_probe(struct usb_interface *interface, dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); - goto error_v4l2_unregister; + goto error; } init_waitqueue_head(&dev->wait_buffer); @@ -314,7 +314,7 @@ static int hdpvr_probe(struct usb_interface *interface, dev->workqueue = create_singlethread_workqueue("hdpvr_buffer"); if (!dev->workqueue) - goto err_free_usbc; + goto error; dev->options = hdpvr_default_options; @@ -348,13 +348,13 @@ static int hdpvr_probe(struct usb_interface *interface, } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); - goto error_put_usb; + goto error; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); - goto error_put_usb; + goto error; } mutex_lock(&dev->io_mutex); @@ -362,7 +362,7 @@ static int hdpvr_probe(struct usb_interface *interface, mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); - goto error_put_usb; + goto error; } mutex_unlock(&dev->io_mutex); @@ -370,7 +370,7 @@ static int hdpvr_probe(struct usb_interface *interface, retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); - goto error_free_buffers; + goto error; } client = hdpvr_register_ir_rx_i2c(dev); @@ -412,20 +412,15 @@ static int hdpvr_probe(struct usb_interface *interface, reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); -error_free_buffers: #endif - hdpvr_free_buffers(dev); -error_put_usb: - usb_put_dev(dev->udev); - /* Destroy single thread */ - destroy_workqueue(dev->workqueue); -err_free_usbc: - kfree(dev->usbc_buf); -error_v4l2_unregister: - v4l2_device_unregister(&dev->v4l2_dev); -error_free_dev: - kfree(dev); error: + if (dev) { + /* Destroy single thread */ + if (dev->workqueue) + destroy_workqueue(dev->workqueue); + /* this frees allocated memory */ + hdpvr_delete(dev); + } return retval; } diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index 322238d9aa71..e06a21a4fbd9 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -1254,7 +1254,7 @@ static int msi2500_probe(struct usb_interface *intf, } dev->master = master; - master->bus_num = -1; + master->bus_num = 0; master->num_chipselect = 1; master->transfer_one_message = msi2500_transfer_one_message; spi_master_set_devdata(master, dev); diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index 1d0787f0b9a2..232b0fd3e478 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -1488,7 +1488,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw) for (address = 0; address < fwsize; address += 0x800) { memcpy(fw_ptr, fw_entry->data + address, 0x800); ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address, - 0, fw_ptr, 0x800, 1000); + 0, fw_ptr, 0x800, HZ); } trace_firmware("Upload done, releasing device's CPU"); @@ -1627,7 +1627,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw) ((u32 *)fw_ptr)[icnt] = swab32(((u32 *)fw_ptr)[icnt]); ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,bcnt, - &actual_length, 1000); + &actual_length, HZ); ret |= (actual_length != bcnt); if (ret) break; fw_done += bcnt; @@ -2731,8 +2731,9 @@ void pvr2_hdw_destroy(struct pvr2_hdw *hdw) pvr2_stream_destroy(hdw->vid_stream); hdw->vid_stream = NULL; } + pvr2_i2c_core_done(hdw); v4l2_device_unregister(&hdw->v4l2_dev); - pvr2_hdw_disconnect(hdw); + pvr2_hdw_remove_usb_stuff(hdw); mutex_lock(&pvr2_unit_mtx); do { if ((hdw->unit_number >= 0) && @@ -2759,7 +2760,6 @@ void pvr2_hdw_disconnect(struct pvr2_hdw *hdw) { pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw); LOCK_TAKE(hdw->big_lock); - pvr2_i2c_core_done(hdw); LOCK_TAKE(hdw->ctl_lock); pvr2_hdw_remove_usb_stuff(hdw); LOCK_GIVE(hdw->ctl_lock); @@ -3491,7 +3491,7 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, 0xa0,0xc0, address,0, hdw->fw_buffer+address, - 0x800,1000); + 0x800,HZ); if (ret < 0) break; } @@ -4017,7 +4017,7 @@ void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val) /* Write the CPUCS register on the 8051. The lsb of the register is the reset bit; a 1 asserts reset while a 0 clears it. */ pipe = usb_sndctrlpipe(hdw->usb_dev, 0); - ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,1000); + ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "cpureset_assert(%d) error=%d",val,ret); diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index bc120c4e59f6..1b6836f15370 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -76,7 +76,7 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value) return -ENOMEM; ret = usb_control_msg(dev->udev, pipe, 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x00, reg, buf, sizeof(u8), 1000); + 0x00, reg, buf, sizeof(u8), HZ); if (ret < 0) { stk1160_err("read failed on reg 0x%x (%d)\n", reg, ret); @@ -96,7 +96,7 @@ int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value) ret = usb_control_msg(dev->udev, pipe, 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, reg, NULL, 0, 1000); + value, reg, NULL, 0, HZ); if (ret < 0) { stk1160_err("write failed on reg 0x%x (%d)\n", reg, ret); diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 51ac9d067acf..17ee9cde4156 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -1353,7 +1353,7 @@ static int stk_camera_probe(struct usb_interface *interface, if (!dev->isoc_ep) { STK_ERROR("Could not find isoc-in endpoint"); err = -ENODEV; - goto error_put; + goto error; } dev->vsettings.palette = V4L2_PIX_FMT_RGB565; dev->vsettings.mode = MODE_VGA; @@ -1366,12 +1366,10 @@ static int stk_camera_probe(struct usb_interface *interface, err = stk_register_video_device(dev); if (err) - goto error_put; + goto error; return 0; -error_put: - usb_put_intf(interface); error: v4l2_ctrl_handler_free(hdl); v4l2_device_unregister(&dev->v4l2_dev); diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 8afc7de1cf83..87401b18d85a 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -158,10 +158,6 @@ static int tm6000_start_stream(struct tm6000_core *dev) if (ret < 0) { printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n", ret, __func__); - - kfree(dvb->bulk_urb->transfer_buffer); - usb_free_urb(dvb->bulk_urb); - dvb->bulk_urb = NULL; return ret; } else printk(KERN_ERR "tm6000: pipe resetted\n"); diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c index 17128340ea5d..5dab02432e82 100644 --- a/drivers/media/usb/usbtv/usbtv-audio.c +++ b/drivers/media/usb/usbtv/usbtv-audio.c @@ -384,7 +384,7 @@ void usbtv_audio_free(struct usbtv *usbtv) cancel_work_sync(&usbtv->snd_trigger); if (usbtv->snd && usbtv->udev) { - snd_card_free_when_closed(usbtv->snd); + snd_card_free(usbtv->snd); usbtv->snd = NULL; } } diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index def22b7fef9c..f353ab569b8e 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -869,10 +869,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, unsigned int i; extra_size = roundup(extra_size, sizeof(*entity->pads)); - if (num_pads) - num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1; - else - num_inputs = 0; + num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads + num_inputs; entity = kzalloc(size, GFP_KERNEL); @@ -888,7 +885,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, for (i = 0; i < num_inputs; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; - if (!UVC_ENTITY_IS_OTERM(entity) && num_pads) + if (!UVC_ENTITY_IS_OTERM(entity)) entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; entity->bNrInPins = num_inputs; diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 8ac231f6b2d1..a0a544628053 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -243,9 +243,7 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, goto done; /* After the probe, update fmt with the values returned from - * negotiation with the device. Some devices return invalid bFormatIndex - * and bFrameIndex values, in which case we can only assume they have - * accepted the requested format as-is. + * negotiation with the device. */ for (i = 0; i < stream->nformats; ++i) { if (probe->bFormatIndex == stream->format[i].index) { @@ -254,10 +252,11 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, } } - if (i == stream->nformats) - uvc_trace(UVC_TRACE_FORMAT, - "Unknown bFormatIndex %u, using default\n", + if (i == stream->nformats) { + uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n", probe->bFormatIndex); + return -EINVAL; + } for (i = 0; i < format->nframes; ++i) { if (probe->bFrameIndex == format->frame[i].bFrameIndex) { @@ -266,10 +265,11 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, } } - if (i == format->nframes) - uvc_trace(UVC_TRACE_FORMAT, - "Unknown bFrameIndex %u, using default\n", + if (i == format->nframes) { + uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n", probe->bFrameIndex); + return -EINVAL; + } fmt->fmt.pix.width = frame->wWidth; fmt->fmt.pix.height = frame->wHeight; @@ -436,13 +436,10 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, uvc_simplify_fraction(&timeperframe.numerator, &timeperframe.denominator, 8, 333); - if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { + if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) parm->parm.capture.timeperframe = timeperframe; - parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; - } else { + else parm->parm.output.timeperframe = timeperframe; - parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME; - } return 0; } @@ -869,8 +866,8 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input) { struct uvc_fh *handle = fh; struct uvc_video_chain *chain = handle->chain; - u8 *buf; int ret; + u8 i; if (chain->selector == NULL || (chain->dev->quirks & UVC_QUIRK_IGNORE_SELECTOR_UNIT)) { @@ -878,27 +875,22 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input) return 0; } - buf = kmalloc(1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, chain->selector->id, chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, - buf, 1); - if (!ret) - *input = *buf - 1; - - kfree(buf); + &i, 1); + if (ret < 0) + return ret; - return ret; + *input = i - 1; + return 0; } static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input) { struct uvc_fh *handle = fh; struct uvc_video_chain *chain = handle->chain; - u8 *buf; int ret; + u32 i; ret = uvc_acquire_privileges(handle); if (ret < 0) @@ -914,17 +906,10 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input) if (input >= chain->selector->bNrInPins) return -EINVAL; - buf = kmalloc(1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - *buf = input + 1; - ret = uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id, - chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, - buf, 1); - kfree(buf); - - return ret; + i = input + 1; + return uvc_query_ctrl(chain->dev, UVC_SET_CUR, chain->selector->id, + chain->dev->intfnum, UVC_SU_INPUT_SELECT_CONTROL, + &i, 1); } static int uvc_ioctl_queryctrl(struct file *file, void *fh, diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 4e3938f1ad62..a550dbe36dc5 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -89,37 +89,10 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { - static const struct usb_device_id elgato_cam_link_4k = { - USB_DEVICE(0x0fd9, 0x0066) - }; struct uvc_format *format = NULL; struct uvc_frame *frame = NULL; unsigned int i; - /* - * The response of the Elgato Cam Link 4K is incorrect: The second byte - * contains bFormatIndex (instead of being the second byte of bmHint). - * The first byte is always zero. The third byte is always 1. - * - * The UVC 1.5 class specification defines the first five bits in the - * bmHint bitfield. The remaining bits are reserved and should be zero. - * Therefore a valid bmHint will be less than 32. - * - * Latest Elgato Cam Link 4K firmware as of 2021-03-23 needs this fix. - * MCU: 20.02.19, FPGA: 67 - */ - if (usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k) && - ctrl->bmHint > 255) { - u8 corrected_format_index = ctrl->bmHint >> 8; - - /* uvc_dbg(stream->dev, VIDEO, - "Correct USB video probe response from {bmHint: 0x%04x, bFormatIndex: %u} to {bmHint: 0x%04x, bFormatIndex: %u}\n", - ctrl->bmHint, ctrl->bFormatIndex, - 1, corrected_format_index); */ - ctrl->bmHint = 1; - ctrl->bFormatIndex = corrected_format_index; - } - for (i = 0; i < stream->nformats; ++i) { if (stream->format[i].index == ctrl->bFormatIndex) { format = &stream->format[i]; @@ -1720,10 +1693,6 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags) if (ep == NULL) return -EIO; - /* Reject broken descriptors. */ - if (usb_endpoint_maxp(&ep->desc) == 0) - return -EIO; - ret = uvc_init_video_bulk(stream, ep, gfp_flags); } diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c index 72f839e0116a..2d56cccaa474 100644 --- a/drivers/media/usb/zr364xx/zr364xx.c +++ b/drivers/media/usb/zr364xx/zr364xx.c @@ -1068,7 +1068,6 @@ static int zr364xx_start_readpipe(struct zr364xx_camera *cam) DBG("submitting URB %p\n", pipe_info->stream_urb); retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); if (retval) { - usb_free_urb(pipe_info->stream_urb); printk(KERN_ERR KBUILD_MODNAME ": start read pipe failed\n"); return retval; } diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c index ce844ecc3340..1d076deb05a9 100644 --- a/drivers/media/v4l2-core/v4l2-fh.c +++ b/drivers/media/v4l2-core/v4l2-fh.c @@ -107,7 +107,6 @@ int v4l2_fh_release(struct file *filp) v4l2_fh_del(fh); v4l2_fh_exit(fh); kfree(fh); - filp->private_data = NULL; } return 0; } diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 9cd9c5d2eafe..2fd84486c054 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2802,7 +2802,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, v4l2_kioctl func) { char sbuf[128]; - void *mbuf = NULL, *array_buf = NULL; + void *mbuf = NULL; void *parg = (void *)arg; long err = -EINVAL; bool has_array_args; @@ -2857,14 +2857,20 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, has_array_args = err; if (has_array_args) { - array_buf = kmalloc(array_size, GFP_KERNEL); + /* + * When adding new types of array args, make sure that the + * parent argument to ioctl (which contains the pointer to the + * array) fits into sbuf (so that mbuf will still remain + * unused up to here). + */ + mbuf = kmalloc(array_size, GFP_KERNEL); err = -ENOMEM; - if (array_buf == NULL) + if (NULL == mbuf) goto out_array_args; err = -EFAULT; - if (copy_from_user(array_buf, user_ptr, array_size)) + if (copy_from_user(mbuf, user_ptr, array_size)) goto out_array_args; - *kernel_ptr = array_buf; + *kernel_ptr = mbuf; } /* Handles IOCTL */ @@ -2883,7 +2889,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg, if (has_array_args) { *kernel_ptr = (void __force *)user_ptr; - if (copy_to_user(user_ptr, array_buf, array_size)) + if (copy_to_user(user_ptr, mbuf, array_size)) err = -EFAULT; goto out_array_args; } @@ -2903,7 +2909,6 @@ out_array_args: } out: - kfree(array_buf); kfree(mbuf); return err; } diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index c1faa1332e32..e0041fcfa783 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -1361,7 +1361,6 @@ static int vb2_start_streaming(struct vb2_queue *q) int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) { struct vb2_buffer *vb; - enum vb2_buffer_state orig_state; int ret; if (q->error) { @@ -1391,7 +1390,6 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) * Add to the queued buffers list, a buffer will stay on it until * dequeued in dqbuf. */ - orig_state = vb->state; list_add_tail(&vb->queued_entry, &q->queued_list); q->queued_count++; q->waiting_for_buffers = false; @@ -1422,17 +1420,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) if (q->streaming && !q->start_streaming_called && q->queued_count >= q->min_buffers_needed) { ret = vb2_start_streaming(q); - if (ret) { - /* - * Since vb2_core_qbuf will return with an error, - * we should return it to state DEQUEUED since - * the error indicates that the buffer wasn't queued. - */ - list_del(&vb->queued_entry); - q->queued_count--; - vb->state = orig_state; + if (ret) return ret; - } } dprintk(1, "qbuf of buffer %d succeeded\n", vb->index); diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 258d95b9c0ad..acd1460cf787 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c @@ -107,6 +107,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) iounmap(ctrl->regs); dev_set_drvdata(&dev->dev, NULL); + kfree(ctrl); return 0; } @@ -217,8 +218,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - fsl_ifc_ctrl_dev = devm_kzalloc(&dev->dev, sizeof(*fsl_ifc_ctrl_dev), - GFP_KERNEL); + fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); if (!fsl_ifc_ctrl_dev) return -ENOMEM; @@ -275,7 +275,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev); if (ret < 0) - goto err_unmap_nandirq; + goto err; init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait); @@ -284,7 +284,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->irq); - goto err_unmap_nandirq; + goto err_irq; } if (fsl_ifc_ctrl_dev->nand_irq) { @@ -293,16 +293,17 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->nand_irq); - goto err_free_irq; + goto err_nandirq; } } return 0; -err_free_irq: - free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); -err_unmap_nandirq: +err_nandirq: + free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq); +err_irq: + free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); err: return ret; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index f6d06d894538..af187c91fc33 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -1005,8 +1005,8 @@ EXPORT_SYMBOL(gpmc_cs_request); void gpmc_cs_free(int cs) { - struct gpmc_cs_data *gpmc; - struct resource *res; + struct gpmc_cs_data *gpmc = &gpmc_cs[cs]; + struct resource *res = &gpmc->mem; spin_lock(&gpmc_mem_lock); if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) { @@ -1015,9 +1015,6 @@ void gpmc_cs_free(int cs) spin_unlock(&gpmc_mem_lock); return; } - gpmc = &gpmc_cs[cs]; - res = &gpmc->mem; - gpmc_cs_disable_mem(cs); if (res->flags) release_resource(res); diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 2cae85a7ca6d..1041eb7a6167 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -469,6 +469,7 @@ static void memstick_check(struct work_struct *work) host->card = card; if (device_register(&card->dev)) { put_device(&card->dev); + kfree(host->card); host->card = NULL; } } else diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c index d0a4177f034a..24f2f8473dee 100644 --- a/drivers/memstick/core/ms_block.c +++ b/drivers/memstick/core/ms_block.c @@ -1730,7 +1730,7 @@ static int msb_init_card(struct memstick_dev *card) msb->pages_in_block = boot_block->attr.block_size * 2; msb->block_size = msb->page_size * msb->pages_in_block; - if ((size_t)msb->page_size > PAGE_SIZE) { + if (msb->page_size > PAGE_SIZE) { /* this isn't supported by linux at all, anyway*/ dbg("device page %d size isn't supported", msb->page_size); return -EINVAL; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index ba6cd576e997..08fa6400d255 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -905,7 +905,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) iounmap(host->addr); err_out_free: - memstick_free_host(msh); + kfree(msh); return NULL; } diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index 7779aaa6b9b8..ef09ba0289d7 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -763,10 +763,8 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto error3; dev->mmio = pci_ioremap_bar(pdev, 0); - if (!dev->mmio) { - error = -ENOMEM; + if (!dev->mmio) goto error4; - } dev->irq = pdev->irq; spin_lock_init(&dev->irq_lock); @@ -793,14 +791,12 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) &dev->dummy_dma_page_physical_address, GFP_KERNEL); r592_stop_dma(dev , 0); - error = request_irq(dev->irq, &r592_irq, IRQF_SHARED, - DRV_NAME, dev); - if (error) + if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, + DRV_NAME, dev)) goto error6; r592_update_card_detect(dev); - error = memstick_add_host(host); - if (error) + if (memstick_add_host(host)) goto error7; message("driver successfully loaded"); @@ -842,15 +838,15 @@ static void r592_remove(struct pci_dev *pdev) } memstick_remove_host(dev->host); - if (dev->dummy_dma_page) - dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, - dev->dummy_dma_page_physical_address); - free_irq(dev->irq, dev); iounmap(dev->mmio); pci_release_regions(pdev); pci_disable_device(pdev); memstick_free_host(dev->host); + + if (dev->dummy_dma_page) + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, + dev->dummy_dma_page_physical_address); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c index 2992fd94bc0c..2697ffb08009 100644 --- a/drivers/mfd/da9052-i2c.c +++ b/drivers/mfd/da9052-i2c.c @@ -118,7 +118,6 @@ static const struct i2c_device_id da9052_i2c_id[] = { {"da9053-bc", DA9053_BC}, {} }; -MODULE_DEVICE_TABLE(i2c, da9052_i2c_id); #ifdef CONFIG_OF static const struct of_device_id dialog_dt_ids[] = { diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index 66f9beb2c5ba..b6fd9041f82f 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -53,7 +53,6 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev) { struct intel_lpss_platform_info *info; const struct acpi_device_id *id; - int ret; id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev); if (!id) @@ -67,14 +66,10 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev) info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); info->irq = platform_get_irq(pdev, 0); - ret = intel_lpss_probe(&pdev->dev, info); - if (ret) - return ret; - pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - return 0; + return intel_lpss_probe(&pdev->dev, info); } static int intel_lpss_acpi_remove(struct platform_device *pdev) diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c index 663a6c1c3d0d..c3f4aab53b07 100644 --- a/drivers/mfd/stmpe-i2c.c +++ b/drivers/mfd/stmpe-i2c.c @@ -107,7 +107,7 @@ static const struct i2c_device_id stmpe_i2c_id[] = { { "stmpe2403", STMPE2403 }, { } }; -MODULE_DEVICE_TABLE(i2c, stmpe_i2c_id); +MODULE_DEVICE_TABLE(i2c, stmpe_id); static struct i2c_driver stmpe_i2c_driver = { .driver = { diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c index 9f7ae1e1ebcd..fd789d2eb0f5 100644 --- a/drivers/mfd/wm831x-auxadc.c +++ b/drivers/mfd/wm831x-auxadc.c @@ -98,10 +98,11 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x, wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); + + list_del(&req->list); ret = req->val; out: - list_del(&req->list); mutex_unlock(&wm831x->auxadc_lock); kfree(req); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3901fb71e96e..688603a9e3c3 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -698,5 +698,4 @@ source "drivers/misc/genwqe/Kconfig" source "drivers/misc/echo/Kconfig" source "drivers/misc/cxl/Kconfig" source "drivers/misc/mm_tuner/Kconfig" -source "drivers/misc/carillon/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index df1f309d956b..b8e8770bd044 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_NFC_PN553_DEVICES) += pn553.o -obj-$(CONFIG_NFC_CARILLON) += carillon/ obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o diff --git a/drivers/misc/carillon/Kconfig b/drivers/misc/carillon/Kconfig deleted file mode 100644 index 3f36035cae0d..000000000000 --- a/drivers/misc/carillon/Kconfig +++ /dev/null @@ -1,5 +0,0 @@ -comment "Sony Carillon NFC driver" - -config NFC_CARILLON - tristate "Sony Carillon protocol driver (I2C) devices" - default n diff --git a/drivers/misc/carillon/Makefile b/drivers/misc/carillon/Makefile deleted file mode 100644 index f219d1ebe699..000000000000 --- a/drivers/misc/carillon/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_NFC_CARILLON) := sony_carillon_nfc.o -sony_carillon_nfc-objs := main_module.o cxd224x/cxd224x-i2c.o bd7602/bd7602.o diff --git a/drivers/misc/carillon/bd7602/bd7602.c b/drivers/misc/carillon/bd7602/bd7602.c deleted file mode 100644 index 06272b497df9..000000000000 --- a/drivers/misc/carillon/bd7602/bd7602.c +++ /dev/null @@ -1,311 +0,0 @@ -/* drivers/misc/bd7602.c - * - * Author: Manabu Yoshida - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "bd7602.h" - -#define BD7602_DEVICE_NAME "bd7602" -#define BD7602_MODE_MAX 0x04 - -struct bd7602_dev { - struct mutex mutex; - struct i2c_client *client; - struct miscdevice bd7602_device; - u8 mode; -}; - -static ssize_t bd7602_mode_store(struct bd7602_dev *bd7602_dev, void __user *buf) -{ - u8 mode; - int ret; - - ret = copy_from_user(&mode, buf, sizeof(mode)); - if (ret != 0) { - dev_err(&bd7602_dev->client->dev, "%s: Failed to copy data from user %d\n", - __func__, ret); - ret = -EIO; - goto out; - } - - if (BD7602_MODE_MAX < mode) { - dev_err(&bd7602_dev->client->dev, "%s: Invalid mode 0x%02x\n", __func__, mode); - ret = -EINVAL; - goto out; - } - - mutex_lock(&bd7602_dev->mutex); - bd7602_dev->mode = mode; - mutex_unlock(&bd7602_dev->mutex); - dev_info(&bd7602_dev->client->dev, "%s: mode: 0x%02x\n", __func__, bd7602_dev->mode); - -out: - return ret; -} - -static ssize_t bd7602_mode_show(struct bd7602_dev *bd7602_dev, void __user *buf) -{ - int ret; - - if (!buf) { - dev_err(&bd7602_dev->client->dev, "%s: invalid address\n", __func__); - return -EINVAL; - } - - mutex_lock(&bd7602_dev->mutex); - ret = copy_to_user(buf, &bd7602_dev->mode, sizeof(bd7602_dev->mode)); - if (ret != 0) { - dev_err(&bd7602_dev->client->dev, "%s: Failed to copy data to user %d\n", - __func__, ret); - ret = -EIO; - } - mutex_unlock(&bd7602_dev->mutex); - dev_info(&bd7602_dev->client->dev, "%s: mode: 0x%02x\n", __func__, bd7602_dev->mode); - - return ret; -} - -static ssize_t bd7602_value_store(struct bd7602_dev *bd7602_dev, void __user *buf) -{ - u8 data[2]; - int ret; - int len = 2; - - mutex_lock(&bd7602_dev->mutex); - data[0] = bd7602_dev->mode; - ret = copy_from_user(&data[1], buf, sizeof(u8)); - if (ret != 0) { - mutex_unlock(&bd7602_dev->mutex); - dev_err(&bd7602_dev->client->dev, "%s: Failed to copy data from user %d\n", - __func__, ret); - ret = -EIO; - goto out; - } - ret = i2c_master_send(bd7602_dev->client, data, len); - if (ret != len) { - mutex_unlock(&bd7602_dev->mutex); - dev_err(&bd7602_dev->client->dev, "%s: Failed to write %d\n", __func__, ret); - ret = -EIO; - goto out; - } - mutex_unlock(&bd7602_dev->mutex); - - dev_info(&bd7602_dev->client->dev, "%s: mode:0x%02x value:0x%02x\n", - __func__, data[0], data[1]); - -out: - return ret > 0 ? 0 : ret; -} - -static ssize_t bd7602_value_show(struct bd7602_dev *bd7602_dev, void __user *buf) -{ - u8 data; - int ret; - - if (!buf) { - dev_err(&bd7602_dev->client->dev, "%s: invalid address\n", __func__); - ret = -EINVAL; - goto out; - } - - mutex_lock(&bd7602_dev->mutex); - ret = i2c_master_send(bd7602_dev->client, &bd7602_dev->mode, - sizeof(bd7602_dev->mode)); - if (sizeof(bd7602_dev->mode) != ret) { - mutex_unlock(&bd7602_dev->mutex); - dev_err(&bd7602_dev->client->dev, "%s: Failed to write %d\n", __func__, ret); - ret = -EIO; - goto out; - } - - ret = i2c_master_recv(bd7602_dev->client, &data, sizeof(data)); - if (sizeof(data) != ret) { - mutex_unlock(&bd7602_dev->mutex); - dev_err(&bd7602_dev->client->dev, "%s: Failed to read %d\n", __func__, ret); - ret = -EIO; - goto out; - } - - ret = copy_to_user(buf, &data, sizeof(data)); - if (ret != 0) { - mutex_unlock(&bd7602_dev->mutex); - dev_err(&bd7602_dev->client->dev, "%s: Failed to copy data to user %d\n", - __func__, ret); - ret = -EIO; - goto out; - } - mutex_unlock(&bd7602_dev->mutex); - - dev_info(&bd7602_dev->client->dev, "%s: mode:0x%02x value:0x%02x\n", - __func__, bd7602_dev->mode, data); - -out: - return ret > 0 ? 0 : ret; -} - -static long bd7602_dev_unlocked_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct bd7602_dev *bd7602_dev = filp->private_data; - int ret = -EINVAL; - void __user *data = (void __user *)arg; - - if (!bd7602_dev) { - return -ENODEV; - } - - switch (cmd) { - case BD7602_MODE_STORE: - ret = bd7602_mode_store(bd7602_dev, data); - break; - - case BD7602_MODE_SHOW: - ret = bd7602_mode_show(bd7602_dev, data); - break; - - case BD7602_VALUE_STORE: - ret = bd7602_value_store(bd7602_dev, data); - break; - - case BD7602_VALUE_SHOW: - ret = bd7602_value_show(bd7602_dev, data); - break; - - default: - dev_err(&bd7602_dev->client->dev, - "%s, unknown cmd (%x, %lx)\n", __func__, cmd, arg); - } - - return ret; -} - -static int bd7602_dev_open(struct inode *inode, struct file *filp) -{ - struct bd7602_dev *bd7602_dev = container_of(filp->private_data, - struct bd7602_dev, - bd7602_device); - filp->private_data = bd7602_dev; - - dev_info(&bd7602_dev->client->dev, "%s, open\n", __func__); - - return 0; -} - -const struct file_operations bd7602_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .open = bd7602_dev_open, - .unlocked_ioctl = bd7602_dev_unlocked_ioctl -}; - -static int bd7602_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - int ret; - struct bd7602_dev *bd7602_dev; - - dev_info(&client->dev, "%s, probing bd7602 driver flags = %x\n", - __func__, client->flags); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "need I2C_FUNC_I2C\n"); - ret = -ENODEV; - goto err_exit; - } - - bd7602_dev = kzalloc(sizeof(*bd7602_dev), GFP_KERNEL); - if (bd7602_dev == NULL) { - dev_err(&client->dev, - "failed to allocate memory for module data\n"); - ret = -ENOMEM; - goto err_kzalloc; - } - - bd7602_dev->client = client; - i2c_set_clientdata(client, bd7602_dev); - - mutex_init(&bd7602_dev->mutex); - - bd7602_dev->bd7602_device.minor = MISC_DYNAMIC_MINOR; - bd7602_dev->bd7602_device.name = "bd7602-i2c"; - bd7602_dev->bd7602_device.fops = &bd7602_dev_fops; - - ret = misc_register(&bd7602_dev->bd7602_device); - if (ret) { - dev_err(&client->dev, "misc_register failed\n"); - goto err_misc_register; - } - - dev_info(&client->dev, - "%s, probing bd7602 driver exited successfully\n", - __func__); - return 0; - -err_misc_register: - mutex_destroy(&bd7602_dev->mutex); - i2c_set_clientdata(client, NULL); - kzfree(bd7602_dev); -err_kzalloc: -err_exit: - return ret; -} - -static int bd7602_remove(struct i2c_client *client) -{ - struct bd7602_dev *bd7602_dev = i2c_get_clientdata(client); - - mutex_destroy(&bd7602_dev->mutex); - i2c_set_clientdata(client, NULL); - kzfree(bd7602_dev); - - return 0; -} - -static const struct i2c_device_id bd7602_id[] = { - { BD7602_DEVICE_NAME, 0 }, - { } -}; - -static struct of_device_id bd7602_match_table[] = { - { .compatible = "rohm,bd7602", }, - { }, -}; - -static struct i2c_driver bd7602_driver = { - .id_table = bd7602_id, - .probe = bd7602_probe, - .remove = bd7602_remove, - .driver = { - .owner = THIS_MODULE, - .name = BD7602_DEVICE_NAME, - .of_match_table = bd7602_match_table, - }, -}; - -int bd7602_dev_init(void) -{ - return i2c_add_driver(&bd7602_driver); -} - -void bd7602_dev_exit(void) -{ - i2c_del_driver(&bd7602_driver); -} - -MODULE_AUTHOR("Manabu Yoshida "); -MODULE_DESCRIPTION("ROHM BD7602 Power IC Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/carillon/bd7602/bd7602.h b/drivers/misc/carillon/bd7602/bd7602.h deleted file mode 100644 index ccef56d3f69d..000000000000 --- a/drivers/misc/carillon/bd7602/bd7602.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _BD7602_H -#define _BD7602_H - -#define BD7602_MAGIC 'J' - -/* - * BD7602 MODE and VALUE via ioctl - * BD7602_MODE_STORE: store target register address - * BD7602_MODE_SHOW: retrieve current register address - * BD7602_VALUE_STORE: store register value - * BD7602_VALUE_SHOW: retrieve register value - */ -#define BD7602_MODE_STORE _IOW(BD7602_MAGIC, 0x01, char *) -#define BD7602_MODE_SHOW _IOR(BD7602_MAGIC, 0x02, char *) -#define BD7602_VALUE_STORE _IOW(BD7602_MAGIC, 0x03, char *) -#define BD7602_VALUE_SHOW _IOR(BD7602_MAGIC, 0x04, char *) - -#endif diff --git a/drivers/misc/carillon/cxd224x/cxd224x-i2c.c b/drivers/misc/carillon/cxd224x/cxd224x-i2c.c deleted file mode 100644 index fd80e4a85b59..000000000000 --- a/drivers/misc/carillon/cxd224x/cxd224x-i2c.c +++ /dev/null @@ -1,843 +0,0 @@ -/* - * cxd224x-i2c.c - cxd224x NFC i2c driver - * - * Copyright (C) 2013- Sony Corporation. - * Copyright (C) 2012 Broadcom Corporation. - * Copyright (C) 2017 Sony Mobile Communications Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * NOTE: This file has been modified by Sony Mobile Communications Inc. - * Modifications are licensed under the License. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cxd224x.h" - -#define CXD224X_WAKE_LOCK_TIMEOUT 1 /* wake timeout for HOSTINT (sec) */ - -/* do not change below */ -#define MAX_BUFFER_SIZE 780 - -/* Read data */ -#define PACKET_HEADER_SIZE_NCI (3) -#define PACKET_HEADER_SIZE_HCI (3) -#define PACKET_TYPE_NCI (16) -#define PACKET_TYPE_HCIEV (4) -#define MAX_PACKET_SIZE (PACKET_HEADER_SIZE_NCI + 255) - -/* RESET */ -#define RESET_ASSERT_MS (1) - -#define CXD224X_PINCTRL_ACTIVE "felica_active" -#define CXD224X_PINCTRL_SUSPEND "felica_suspend" - -struct cxd224x_dev { - wait_queue_head_t read_wq; - struct mutex read_mutex; - struct i2c_client *client; - struct miscdevice cxd224x_device; - struct cxd224x_platform_data *gpio; - bool irq_enabled; - struct mutex lock; - spinlock_t irq_enabled_lock; - unsigned int users; - unsigned int count_irq; - /* Driver message queue */ - struct workqueue_struct *wqueue; - struct work_struct qmsg; - struct pinctrl *pinctrl; - struct pinctrl_state *gpio_state_active; - struct pinctrl_state *gpio_state_suspend; -}; - -static struct cxd224x_dev *p_cxd224x_dev; -static void cxd224x_enable_pon(struct cxd224x_dev *cxd224x_dev); -static void cxd224x_conditional_disable_pon(struct cxd224x_dev *cxd224x_dev); - -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) -static void cxd224x_workqueue(struct work_struct *work) -{ - struct cxd224x_dev *cxd224x_dev = container_of(work, struct cxd224x_dev, qmsg); - unsigned long flags; - - dev_info(&cxd224x_dev->client->dev, "%s, xrst assert\n", __func__); - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - gpio_set_value(cxd224x_dev->gpio->rst_gpio, CXDNFC_RST_ACTIVE); - cxd224x_dev->count_irq=0; /* clear irq */ - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); - - msleep(RESET_ASSERT_MS); - dev_info(&cxd224x_dev->client->dev, "%s, xrst deassert\n", __func__); - gpio_set_value(cxd224x_dev->gpio->rst_gpio, ~CXDNFC_RST_ACTIVE & 0x1); -} - -static int __init init_wqueue(struct cxd224x_dev *cxd224x_dev) -{ - INIT_WORK(&cxd224x_dev->qmsg, cxd224x_workqueue); - cxd224x_dev->wqueue = create_workqueue("cxd224x-i2c_wrokq"); - if (cxd224x_dev->wqueue == NULL) - return -EBUSY; - return 0; -} -#endif /* CONFIG_NFC_CXD224X_RST */ - -static void cxd224x_init_stat(struct cxd224x_dev *cxd224x_dev) -{ - cxd224x_dev->count_irq = 0; -} - -static void cxd224x_disable_irq(struct cxd224x_dev *cxd224x_dev) -{ - unsigned long flags; - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - if (cxd224x_dev->irq_enabled) { - disable_irq_nosync(cxd224x_dev->client->irq); - cxd224x_dev->irq_enabled = false; - } - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); -} - -static void cxd224x_enable_irq(struct cxd224x_dev *cxd224x_dev) -{ - unsigned long flags; - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - if (!cxd224x_dev->irq_enabled) { - cxd224x_dev->irq_enabled = true; - enable_irq(cxd224x_dev->client->irq); - } - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); -} - -static irqreturn_t cxd224x_dev_irq_handler(int irq, void *dev_id) -{ - struct cxd224x_dev *cxd224x_dev = dev_id; - unsigned long flags; - - dev_info(&cxd224x_dev->client->dev, "%s\n", __func__); - - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - cxd224x_dev->count_irq++; - if (cxd224x_dev->irq_enabled) { - cxd224x_enable_pon(cxd224x_dev); - } - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); - - wake_up(&cxd224x_dev->read_wq); - - return IRQ_HANDLED; -} - -static unsigned int cxd224x_dev_poll(struct file *filp, poll_table *wait) -{ - struct cxd224x_dev *cxd224x_dev = filp->private_data; - unsigned int mask = 0; - unsigned long flags; - - poll_wait(filp, &cxd224x_dev->read_wq, wait); - - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - if (cxd224x_dev->count_irq > 0) - { - mask |= POLLIN | POLLRDNORM; - } - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); - - if(mask) - pm_wakeup_event(&cxd224x_dev->client->dev, - jiffies_to_msecs(CXD224X_WAKE_LOCK_TIMEOUT*HZ)); - - return mask; -} - -static ssize_t cxd224x_dev_read(struct file *filp, char __user *buf, - size_t count, loff_t *offset) -{ - struct cxd224x_dev *cxd224x_dev = filp->private_data; - unsigned char tmp[MAX_BUFFER_SIZE]; - int total, len, ret; - unsigned long flags; - - total = 0; - len = 0; - - if (count > MAX_BUFFER_SIZE) - count = MAX_BUFFER_SIZE; - - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flags); - if (cxd224x_dev->count_irq > 0) - cxd224x_dev->count_irq--; - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flags); - - mutex_lock(&cxd224x_dev->read_mutex); - - ret = i2c_master_recv(cxd224x_dev->client, tmp, 3); - if (ret == 3 && (tmp[0] != 0xff)) { - total = ret; - - len = tmp[PACKET_HEADER_SIZE_NCI-1]; - - /** make sure full packet fits in the buffer - **/ - if (len > 0 && (len + total) <= count) { - /** read the remainder of the packet. - **/ - ret = i2c_master_recv(cxd224x_dev->client, tmp+total, len); - if (ret == len) - total += len; - } - } - - mutex_unlock(&cxd224x_dev->read_mutex); - - if (total > count || copy_to_user(buf, tmp, total)) { - dev_err(&cxd224x_dev->client->dev, - "failed to copy to user space, total = %d\n", total); - total = -EFAULT; - } - - return total; -} - -static ssize_t cxd224x_dev_write(struct file *filp, const char __user *buf, - size_t count, loff_t *offset) -{ - struct cxd224x_dev *cxd224x_dev = filp->private_data; - char tmp[MAX_BUFFER_SIZE]; - int ret; - - if (count > MAX_BUFFER_SIZE) { - dev_err(&cxd224x_dev->client->dev, "out of memory\n"); - return -ENOMEM; - } - - if (copy_from_user(tmp, buf, count)) { - dev_err(&cxd224x_dev->client->dev, - "failed to copy from user space\n"); - return -EFAULT; - } - - mutex_lock(&cxd224x_dev->read_mutex); - /* Write data */ - - ret = i2c_master_send(cxd224x_dev->client, tmp, count); - if (ret != count) { - dev_err(&cxd224x_dev->client->dev, - "failed to write %d\n", ret); - ret = -EIO; - } - mutex_unlock(&cxd224x_dev->read_mutex); - - return ret; -} - -static int cxd224x_dev_open(struct inode *inode, struct file *filp) -{ - int ret = 0; - int call_enable = 0; - struct cxd224x_dev *cxd224x_dev = container_of(filp->private_data, - struct cxd224x_dev, - cxd224x_device); - filp->private_data = cxd224x_dev; - mutex_lock(&cxd224x_dev->lock); - if (!cxd224x_dev->users) - { - cxd224x_init_stat(cxd224x_dev); - call_enable = 1; - } - cxd224x_dev->users++; - if (call_enable) - cxd224x_enable_irq(cxd224x_dev); - mutex_unlock(&cxd224x_dev->lock); - - dev_info(&cxd224x_dev->client->dev, - "open %d,%d users=%d\n", imajor(inode), iminor(inode), cxd224x_dev->users); - - return ret; -} - -static int cxd224x_dev_release(struct inode *inode, struct file *filp) -{ - int ret = 0; - int call_disable = 0; - struct cxd224x_dev *cxd224x_dev = filp->private_data; - - mutex_lock(&cxd224x_dev->lock); - cxd224x_dev->users--; - if (!cxd224x_dev->users) - { - call_disable = 1; - } - if (call_disable) - { - cxd224x_disable_irq(cxd224x_dev); - gpio_set_value(cxd224x_dev->gpio->wake_gpio, 0); - } - mutex_unlock(&cxd224x_dev->lock); - - dev_info(&cxd224x_dev->client->dev, - "release %d,%d users=%d\n", imajor(inode), iminor(inode), cxd224x_dev->users); - - return ret; -} - -static void cxd224x_enable_pon(struct cxd224x_dev *cxd224x_dev) -{ - gpio_set_value(cxd224x_dev->gpio->wake_gpio, 1); -} - -static void cxd224x_conditional_disable_pon(struct cxd224x_dev *cxd224x_dev) -{ - unsigned long flag; - - spin_lock_irqsave(&cxd224x_dev->irq_enabled_lock, flag); - /* Do not disable PON when data is available to be read */ - if (cxd224x_dev->count_irq == 0) - gpio_set_value(cxd224x_dev->gpio->wake_gpio, 0); - spin_unlock_irqrestore(&cxd224x_dev->irq_enabled_lock, flag); -} - -static long cxd224x_dev_unlocked_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct cxd224x_dev *cxd224x_dev = filp->private_data; - - switch (cmd) { - case CXDNFC_RST_CTL: -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - dev_info(&cxd224x_dev->client->dev, "%s, rst arg=%d\n", __func__, (int)arg); - return (queue_work(cxd224x_dev->wqueue, &cxd224x_dev->qmsg) ? 0 : 1); -#endif - break; - case CXDNFC_POWER_CTL: -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - if (arg == 0) { - gpio_set_value(cxd224x_dev->en_gpio, 1); - } else if (arg == 1) { - gpio_set_value(cxd224x_dev->en_gpio, 0); - } else { - /* do nothing */ - } -#else - return 1; /* not support */ -#endif - break; - case CXDNFC_WAKE_CTL: - if (arg == 0) { - /* PON HIGH (normal power mode)*/ - cxd224x_enable_pon(cxd224x_dev); - } else if (arg == 1) { - /* PON LOW (low power mode) */ - cxd224x_conditional_disable_pon(cxd224x_dev); - } else { - /* do nothing */ - } - break; - default: - dev_err(&cxd224x_dev->client->dev, - "%s, unknown cmd (%x, %lx)\n", __func__, cmd, arg); - return 0; - } - - return 0; -} - -static const struct file_operations cxd224x_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .poll = cxd224x_dev_poll, - .read = cxd224x_dev_read, - .write = cxd224x_dev_write, - .open = cxd224x_dev_open, - .release = cxd224x_dev_release, - .unlocked_ioctl = cxd224x_dev_unlocked_ioctl -}; - -static int cxd224x_pinctrl_init(struct device *dev, struct cxd224x_dev *cxd224x_dev) -{ - int ret = 0; - - cxd224x_dev->pinctrl = devm_pinctrl_get(dev); - if (IS_ERR_OR_NULL(cxd224x_dev->pinctrl)) { - dev_err(dev, "error devm_pinctrl_get() failed err:%ld\n", - PTR_ERR(cxd224x_dev->pinctrl)); - ret = PTR_ERR(cxd224x_dev->pinctrl); - goto out; - } - - cxd224x_dev->gpio_state_active = pinctrl_lookup_state( - cxd224x_dev->pinctrl, CXD224X_PINCTRL_ACTIVE); - - if (IS_ERR_OR_NULL(cxd224x_dev->gpio_state_active)) { - ret = PTR_ERR(cxd224x_dev->gpio_state_active); - dev_info(dev, "note pinctrl_lookup_state(%s) err:%d\n", - CXD224X_PINCTRL_ACTIVE, ret); - goto out; - } - - cxd224x_dev->gpio_state_suspend = pinctrl_lookup_state( - cxd224x_dev->pinctrl, CXD224X_PINCTRL_SUSPEND); - - if (IS_ERR_OR_NULL(cxd224x_dev->gpio_state_suspend)) { - ret = PTR_ERR(cxd224x_dev->gpio_state_suspend); - dev_info(dev, "note pinctrl_lookup_state(%s) err:%d\n", - CXD224X_PINCTRL_ACTIVE, ret); - goto out; - } - -out: - return ret; -} - -static void cxd224x_pinctrl_select_state(struct device *dev, bool active) -{ - struct cxd224x_dev *cxd224x_dev = dev_get_drvdata(dev); - struct pinctrl_state *pins_state; - const char *pins_state_name; - - if (active) { - pins_state = cxd224x_dev->gpio_state_active; - pins_state_name = CXD224X_PINCTRL_ACTIVE; - } else { - pins_state = cxd224x_dev->gpio_state_suspend; - pins_state_name = CXD224X_PINCTRL_SUSPEND; - } - - if (!IS_ERR_OR_NULL(pins_state)) { - int ret = pinctrl_select_state(cxd224x_dev->pinctrl, - pins_state); - if (ret) - dev_err(dev, "error pinctrl_select_state(%s) err:%d\n", - pins_state_name, ret); - } else { - dev_err(dev, - "error pinctrl state-name:'%s' is not configured\n", - pins_state_name); - } -} - -#if defined(CONFIG_OF) -static int cxd224x_parse_dt(struct device *dev, - struct cxd224x_platform_data *pdata) -{ - int ret=0; - - /*nfc_int*/ - pdata->irq_gpio = of_get_named_gpio_flags(dev->of_node, "sony,nfc_int", 0,NULL); - if (pdata->irq_gpio < 0) { - pr_err( "failed to get \"nfc_int\"\n"); - goto dt_err; - } - -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - pdata->en_gpio = of_get_named_gpio_flags(dev->of_node, "sony,nfc_ven", 0,NULL); - if (pdata->en_gpio< 0) { - pr_err( "failed to get \"nfc_ven\"\n"); - goto dt_err; - } -#endif - -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - pdata->rst_gpio = of_get_named_gpio_flags(dev->of_node, "sony,nfc_rst", 0,NULL); - if (pdata->rst_gpio< 0) { - pr_err( "failed to get \"nfc_rst\"\n"); - goto dt_err; - } -#endif - - pdata->wake_gpio = of_get_named_gpio_flags(dev->of_node, "sony,nfc_wake", 0,NULL); - if (pdata->wake_gpio< 0) { - pr_err( "failed to get \"nfc_wake\"\n"); - goto dt_err; - } - return 0; - -dt_err: - return ret; -} -#endif - -static int cxd224x_probe(struct i2c_client *client, - const struct i2c_device_id *id) -{ - int ret; - struct cxd224x_platform_data *platform_data=NULL; - struct cxd224x_dev *cxd224x_dev; - struct clk *felica_clk = NULL; - int irq_gpio_ok = 0; -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - int en_gpio_ok = 0; -#endif -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - int rst_gpio_ok = 0; -#endif - int wake_gpio_ok = 0; - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { - dev_err(&client->dev, "need I2C_FUNC_I2C\n"); - return -ENODEV; - } - -#if defined(CONFIG_OF) - platform_data = kzalloc(sizeof(struct cxd224x_platform_data), - GFP_KERNEL); - if (platform_data == NULL) { - dev_err(&client->dev, "failed to allocate memory\n"); - return -ENOMEM; - } - ret = cxd224x_parse_dt(&client->dev, platform_data); - if (ret) { - dev_err(&client->dev, "failed to parse device tree\n"); - kfree(platform_data); - return -ENODEV; - } -#else - platform_data = client->dev.platform_data; - - dev_info(&client->dev, "%s, probing cxd224x driver flags = %x\n", __func__, client->flags); - if (platform_data == NULL) { - dev_err(&client->dev, "nfc probe fail\n"); - return -ENODEV; - } -#endif - dev_info(&client->dev, "%s, rst_gpio(%d)\n", __func__, platform_data->rst_gpio); - dev_info(&client->dev, "%s, ven_gpio(%d)\n", __func__, platform_data->en_gpio); - dev_info(&client->dev, "%s, irq_gpio(%d)\n", __func__, platform_data->irq_gpio); - dev_info(&client->dev, "%s, wake_gpio(%d)\n", __func__, platform_data->wake_gpio); - - irq_gpio_ok=1; - client->irq = gpio_to_irq(platform_data->irq_gpio); - if (client->irq<0) - { - dev_err(&client->dev, "%s, failed to allocate irq=%d\n", __func__, client->irq); - return -ENODEV; - } - dev_info(&client->dev, "%s, irq(%d)\n", __func__, client->irq); - -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - ret = gpio_request_one(platform_data->en_gpio, GPIOF_OUT_INIT_LOW, "nfc_cen"); - if (ret) - goto err_exit; - en_gpio_ok=1; - ret = gpio_direction_output(platform_data->en_gpio, 0); - if (ret) - return -ENODEV; -#endif - -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - ret = gpio_request_one(platform_data->rst_gpio, GPIOF_OUT_INIT_HIGH, "nfc_rst"); - if (ret) - goto err_exit; - rst_gpio_ok=1; - ret = gpio_direction_output(platform_data->rst_gpio, ~CXDNFC_RST_ACTIVE & 0x1); - if (ret) - return -ENODEV; - dev_info(&client->dev, "%s, xrst deassert\n", __func__); -#endif - - ret = gpio_request_one(platform_data->wake_gpio, GPIOF_OUT_INIT_LOW, "nfc_wake"); - if (ret) - goto err_exit; - wake_gpio_ok=1; - ret = gpio_direction_output(platform_data->wake_gpio,0); - - cxd224x_dev = kzalloc(sizeof(*cxd224x_dev), GFP_KERNEL); - if (cxd224x_dev == NULL) { - dev_err(&client->dev, - "failed to allocate memory for module data\n"); - ret = -ENOMEM; - goto err_exit; - } - - cxd224x_dev->client = client; - cxd224x_dev->gpio = platform_data; - cxd224x_dev->users =0; - - /* init mutex and queues */ - init_waitqueue_head(&cxd224x_dev->read_wq); - mutex_init(&cxd224x_dev->read_mutex); - mutex_init(&cxd224x_dev->lock); - spin_lock_init(&cxd224x_dev->irq_enabled_lock); - -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - if (init_wqueue(cxd224x_dev) != 0) { - dev_err(&client->dev, "init workqueue failed\n"); - goto err_exit; - } -#endif - - ret = cxd224x_pinctrl_init(&client->dev, cxd224x_dev); - if (ret) { - dev_err(&client->dev, "pinctrl_init failed\n"); - goto err_pinctrl_init; - } - - cxd224x_dev->cxd224x_device.minor = MISC_DYNAMIC_MINOR; - cxd224x_dev->cxd224x_device.name = "cxd224x-i2c"; - cxd224x_dev->cxd224x_device.fops = &cxd224x_dev_fops; - - ret = misc_register(&cxd224x_dev->cxd224x_device); - if (ret) { - dev_err(&client->dev, "misc_register failed\n"); - goto err_misc_register; - } - - felica_clk = clk_get(&client->dev, "felica_clk"); - if (IS_ERR(felica_clk)) { - dev_info(&client->dev, "Couldn't get felica_clk, XTAL should be used\n"); - } else { - ret = clk_prepare_enable(felica_clk); - if (ret) { - dev_err(&client->dev, "failed to enable felica_clk\n"); - goto err_clk_enable; - } - } - - /* request irq. the irq is set whenever the chip has data available - * for reading. it is cleared when all data has been read. - */ - dev_info(&client->dev, "requesting IRQ %d\n", client->irq); - cxd224x_dev->irq_enabled = true; - ret = request_irq(client->irq, cxd224x_dev_irq_handler, - IRQF_TRIGGER_FALLING, client->name, cxd224x_dev); - if (ret) { - dev_err(&client->dev, "request_irq failed\n"); - goto err_request_irq_failed; - } - cxd224x_disable_irq(cxd224x_dev); - i2c_set_clientdata(client, cxd224x_dev); - cxd224x_pinctrl_select_state(&client->dev, true); - device_init_wakeup(&client->dev, 1); - p_cxd224x_dev = cxd224x_dev; - - dev_info(&client->dev, - "%s, probing cxd224x driver exited successfully\n", - __func__); - return 0; - -err_request_irq_failed: - misc_deregister(&cxd224x_dev->cxd224x_device); -err_clk_enable: - clk_put(felica_clk); -err_pinctrl_init: -err_misc_register: - mutex_destroy(&cxd224x_dev->read_mutex); - kfree(cxd224x_dev); -err_exit: - if(irq_gpio_ok) - gpio_free(platform_data->irq_gpio); -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - if(en_gpio_ok) - gpio_free(platform_data->en_gpio); -#endif -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - if(rst_gpio_ok) - gpio_free(platform_data->rst_gpio); -#endif - if(wake_gpio_ok) - gpio_free(platform_data->wake_gpio); - -#if defined(CONFIG_OF) - if(platform_data) - kfree(platform_data); -#endif - return ret; -} - -static int cxd224x_remove(struct i2c_client *client) -{ - struct cxd224x_dev *cxd224x_dev; - - cxd224x_dev = i2c_get_clientdata(client); - - device_wakeup_disable(&client->dev); - - free_irq(client->irq, cxd224x_dev); - misc_deregister(&cxd224x_dev->cxd224x_device); - mutex_destroy(&cxd224x_dev->read_mutex); - if(cxd224x_dev->gpio) - { - gpio_free(cxd224x_dev->gpio->irq_gpio); - gpio_free(cxd224x_dev->gpio->wake_gpio); - -#if defined(CONFIG_NFC_CXD224X_VEN) || defined(CONFIG_NFC_CXD224X_VEN_MODULE) - gpio_free(cxd224x_dev->gpio->en_gpio); -#endif -#if defined(CONFIG_NFC_CXD224X_RST) || defined(CONFIG_NFC_CXD224X_RST_MODULE) - gpio_free(cxd224x_dev->gpio->rst_gpio); -#endif - -#if defined(CONFIG_OF) - - kfree(cxd224x_dev->gpio); -#endif - } - kfree(cxd224x_dev); - - return 0; -} - -#ifdef CONFIG_PM_SLEEP -static int cxd224x_suspend(struct device *dev) -{ - struct cxd224x_dev *cxd224x_dev = dev_get_drvdata(dev); - - dev_info(dev, "%s\n", __func__); - - if (device_may_wakeup(&cxd224x_dev->client->dev)) { - int irq = gpio_to_irq(cxd224x_dev->gpio->irq_gpio); - enable_irq_wake(irq); - } - - cxd224x_pinctrl_select_state(dev, false); - - return 0; -} - -static int cxd224x_resume(struct device *dev) -{ - struct cxd224x_dev *cxd224x_dev = dev_get_drvdata(dev); - - dev_info(dev, "%s\n", __func__); - - if (device_may_wakeup(&cxd224x_dev->client->dev)) { - int irq = gpio_to_irq(cxd224x_dev->gpio->irq_gpio); - disable_irq_wake(irq); - } - - cxd224x_pinctrl_select_state(dev, true); - - return 0; -} - -static const struct dev_pm_ops cxd224x_pm_ops = { - .suspend = cxd224x_suspend, - .resume = cxd224x_resume, -}; -#endif - -static const struct i2c_device_id cxd224x_id[] = { - {"cxd224x-i2c", 0}, - {} -}; -MODULE_DEVICE_TABLE(i2c, cxd224x_id); - -static struct i2c_driver cxd224x_driver = { - .id_table = cxd224x_id, - .probe = cxd224x_probe, - .remove = cxd224x_remove, - .driver = { - .owner = THIS_MODULE, - .name = "cxd224x-i2c", - }, -}; - -static const struct platform_device_id cxd224x_pm_ops_id[] = { - { "cxd224x-pm-ops", 0 }, - { } -}; - -static const struct of_device_id cxd224x_pm_ops_match_table[] = { - { .compatible = "sony,cxd224x-pm-ops" }, - { } -}; - -static int cxd224x_pm_ops_probe(struct platform_device *pdev) -{ - dev_info(&pdev->dev, "%s, probing cxd224x PM OPS driver\n", __func__); - - if (!p_cxd224x_dev) - return -ENODEV; - - platform_set_drvdata(pdev, p_cxd224x_dev); - - dev_info(&pdev->dev, - "%s, probing cxd224x PM OPS driver successfully\n", __func__); - - return 0; -} - -static struct platform_driver cxd224x_pm_ops_driver = { - .id_table = cxd224x_pm_ops_id, - .probe = cxd224x_pm_ops_probe, - .driver = { - .name = "cxd224x-pm-ops", - .of_match_table = cxd224x_pm_ops_match_table, -#ifdef CONFIG_PM - .pm = &cxd224x_pm_ops, -#endif - }, -}; - -/* - * module load/unload record keeping - */ - -int cxd224x_dev_init(void) -{ - int ret; - - ret = i2c_add_driver(&cxd224x_driver); - if (ret) - goto exit; - - ret = platform_driver_register(&cxd224x_pm_ops_driver); - if (ret) - goto exit_del_i2c_driver; - - return 0; - -exit_del_i2c_driver: - i2c_del_driver(&cxd224x_driver); - -exit: - return ret; -} - -void cxd224x_dev_exit(void) -{ - if (p_cxd224x_dev) - p_cxd224x_dev = NULL; - - i2c_del_driver(&cxd224x_driver); - platform_driver_unregister(&cxd224x_pm_ops_driver); -} - -MODULE_AUTHOR("Sony"); -MODULE_DESCRIPTION("NFC cxd224x driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/carillon/cxd224x/cxd224x.h b/drivers/misc/carillon/cxd224x/cxd224x.h deleted file mode 100644 index 6e2d171c8ce4..000000000000 --- a/drivers/misc/carillon/cxd224x/cxd224x.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * cxd224x-i2c.c - cxd224x NFC driver - * - * Copyright (C) 2013- Sony Corporation. - * Copyright (C) 2012 Broadcom Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _CXD224X_H -#define _CXD224X_H - -#define CXDNFC_MAGIC 'S' -/* - * CXDNFC power control via ioctl - * CXDNFC_POWER_CTL(0): power off - * CXDNFC_POWER_CTL(1): power on - * CXDNFC_WAKE_CTL(0): PON HIGH (normal power mode) - * CXDNFC_WAKE_CTL(1): PON LOW (low power mode) - * CXDNFC_WAKE_RST(): assert XRST - */ -#define CXDNFC_POWER_CTL _IO(CXDNFC_MAGIC, 0x01) -#define CXDNFC_WAKE_CTL _IO(CXDNFC_MAGIC, 0x02) -#define CXDNFC_RST_CTL _IO(CXDNFC_MAGIC, 0x03) - -#define CXDNFC_RST_ACTIVE 1 /* ActiveHi = 1, ActiveLow = 0 */ - -struct cxd224x_platform_data { - unsigned int irq_gpio; - unsigned int en_gpio; - unsigned int wake_gpio; - unsigned int rst_gpio; -}; - -#endif diff --git a/drivers/misc/carillon/main_module.c b/drivers/misc/carillon/main_module.c deleted file mode 100644 index cb885f84d99d..000000000000 --- a/drivers/misc/carillon/main_module.c +++ /dev/null @@ -1,54 +0,0 @@ -/* main_module.c - * - * Copyright (C) 2017 Sony Mobile Communications Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - */ -#include -#include -#include - -extern int bd7602_dev_init(void); -extern void bd7602_dev_exit(void); -extern int cxd224x_dev_init(void); -extern void cxd224x_dev_exit(void); - -static int __init nfc_carillon_init(void) -{ - int ret; - - pr_info("%s\n", __func__); - - ret = bd7602_dev_init(); - if (ret) - goto exit; - - ret = cxd224x_dev_init(); - if (ret) - goto exit_del_bd7602_driver; - - return 0; - -exit_del_bd7602_driver: - bd7602_dev_exit(); - -exit: - return ret; -} -module_init(nfc_carillon_init); - -static void __exit nfc_carillon_exit(void) -{ - pr_info("%s\n", __func__); - - cxd224x_dev_exit(); - bd7602_dev_exit(); -} -module_exit(nfc_carillon_exit); - -MODULE_AUTHOR("Sony Mobile Communications Inc."); -MODULE_DESCRIPTION("NFC Carillon's component driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c index 4d2a72a537d4..2a40d0efdff5 100644 --- a/drivers/misc/cb710/sgbuf2.c +++ b/drivers/misc/cb710/sgbuf2.c @@ -50,7 +50,7 @@ static inline bool needs_unaligned_copy(const void *ptr) #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS return false; #else - return ((uintptr_t)ptr & 3) != 0; + return ((ptr - NULL) & 3) != 0; #endif } diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 22c1f06728a9..ff63f05edc76 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c @@ -381,4 +381,3 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs"); MODULE_AUTHOR("Anatolij Gustschin "); MODULE_ALIAS("spi:93xx46"); -MODULE_ALIAS("spi:eeprom-93xx46"); diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c index 5687046c0b1a..ea81ccda4ee5 100644 --- a/drivers/misc/hdcp.c +++ b/drivers/misc/hdcp.c @@ -2462,10 +2462,6 @@ int hdcp1_validate_receiver_ids(struct hdcp_srm_device_id_t *device_ids, int i = 0; struct qseecom_handle *hdcp1_srmhandle; - /* do not proceed further if no device connected */ - if (device_id_cnt == 0) - goto end; - /* If client has not been registered return */ if (!hdcp1_supported || !hdcp1_handle) return -EINVAL; diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c index 706decef68a0..6b3bf9ab051d 100644 --- a/drivers/misc/ibmasm/module.c +++ b/drivers/misc/ibmasm/module.c @@ -123,7 +123,7 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) result = ibmasm_init_remote_input_dev(sp); if (result) { dev_err(sp->dev, "Failed to initialize remote queue\n"); - goto error_init_remote; + goto error_send_message; } result = ibmasm_send_driver_vpd(sp); @@ -143,9 +143,8 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error_send_message: - ibmasm_free_remote_input_dev(sp); -error_init_remote: disable_sp_interrupts(sp->base_address); + ibmasm_free_remote_input_dev(sp); free_irq(sp->irq, (void *)sp); error_request_irq: iounmap(sp->base_address); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index ab2184003c29..fc8cb855c6e6 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -105,20 +105,19 @@ #include #include -#define v1printk(a...) do { \ - if (verbose) \ - printk(KERN_INFO a); \ -} while (0) -#define v2printk(a...) do { \ - if (verbose > 1) { \ - printk(KERN_INFO a); \ - } \ - touch_nmi_watchdog(); \ -} while (0) -#define eprintk(a...) do { \ - printk(KERN_ERR a); \ - WARN_ON(1); \ -} while (0) +#define v1printk(a...) do { \ + if (verbose) \ + printk(KERN_INFO a); \ + } while (0) +#define v2printk(a...) do { \ + if (verbose > 1) \ + printk(KERN_INFO a); \ + touch_nmi_watchdog(); \ + } while (0) +#define eprintk(a...) do { \ + printk(KERN_ERR a); \ + WARN_ON(1); \ + } while (0) #define MAX_CONFIG_LEN 40 static struct kgdb_io kgdbts_io_ops; diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 645d26536114..626fdcaf2510 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -81,12 +81,12 @@ static void firmware_load(const struct firmware *fw, void *context) if (fw == NULL) { dev_err(&spi->dev, "Cannot load firmware, aborting\n"); - goto out; + return; } if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); - goto out; + return; } /* Fill dummy data (24 stuffing bits for commands) */ @@ -108,7 +108,7 @@ static void firmware_load(const struct firmware *fw, void *context) dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); - goto out; + return; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); @@ -121,7 +121,7 @@ static void firmware_load(const struct firmware *fw, void *context) buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); - goto out; + return; } /* @@ -160,7 +160,7 @@ static void firmware_load(const struct firmware *fw, void *context) "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); - goto out; + return; } dev_info(&spi->dev, "Configuring the FPGA...\n"); @@ -186,7 +186,7 @@ static void firmware_load(const struct firmware *fw, void *context) release_firmware(fw); kfree(buffer); -out: + complete(&data->fw_loaded); } diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index 205dc5d40ce6..fb8705fc3aca 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -220,7 +220,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000}; static int lis3_3dlh_rates[4] = {50, 100, 400, 1000}; /* ODR is Output Data Rate */ -static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3) +static int lis3lv02d_get_odr(struct lis3lv02d *lis3) { u8 ctrl; int shift; @@ -228,23 +228,15 @@ static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3) lis3->read(lis3, CTRL_REG1, &ctrl); ctrl &= lis3->odr_mask; shift = ffs(lis3->odr_mask) - 1; - return (ctrl >> shift); + return lis3->odrs[(ctrl >> shift)]; } static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3) { - int odr_idx = lis3lv02d_get_odr_index(lis3); - int div = lis3->odrs[odr_idx]; + int div = lis3lv02d_get_odr(lis3); - if (div == 0) { - if (odr_idx == 0) { - /* Power-down mode, not sampling no need to sleep */ - return 0; - } - - dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx); + if (WARN_ONCE(div == 0, "device returned spurious data")) return -ENXIO; - } /* LIS3 power on delay is quite long */ msleep(lis3->pwron_delay / div); @@ -827,12 +819,9 @@ static ssize_t lis3lv02d_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lis3lv02d *lis3 = dev_get_drvdata(dev); - int odr_idx; lis3lv02d_sysfs_poweron(lis3); - - odr_idx = lis3lv02d_get_odr_index(lis3); - return sprintf(buf, "%d\n", lis3->odrs[odr_idx]); + return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3)); } static ssize_t lis3lv02d_rate_set(struct device *dev, diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h index 0ef759671b54..c439c827eea8 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.h +++ b/drivers/misc/lis3lv02d/lis3lv02d.h @@ -284,7 +284,6 @@ struct lis3lv02d { int regs_size; u8 *reg_cache; bool regs_stored; - bool init_required; u8 odr_mask; /* ODR bit mask */ u8 whoami; /* indicates measurement precision */ s16 (*read_data) (struct lis3lv02d *lis3, int reg); diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 85bf730c7c91..d1df797c7568 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -220,9 +220,6 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, return ret; } - pm_runtime_mark_last_busy(dev->dev); - pm_request_autosuspend(dev->dev); - list_move_tail(&cb->list, &cl->rd_pending); return 0; diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 60126fc1ff46..c25eeeaac4e1 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -1,7 +1,7 @@ /* * QTI Secure Execution Environment Communicator (QSEECOM) driver * - * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -3552,59 +3552,52 @@ static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp) int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req, struct qseecom_send_modfd_listener_resp *lstnr_resp, - struct qseecom_dev_handle *data, int i, size_t size) -{ - char *curr_field = NULL; - char *temp_field = NULL; - int j = 0; + struct qseecom_dev_handle *data, int i) { if ((data->type != QSEECOM_LISTENER_SERVICE) && (req->ifd_data[i].fd > 0)) { - if ((req->cmd_req_len < size) || + if ((req->cmd_req_len < sizeof(uint32_t)) || (req->ifd_data[i].cmd_buf_offset > - req->cmd_req_len - size)) { + req->cmd_req_len - sizeof(uint32_t))) { pr_err("Invalid offset (req len) 0x%x\n", req->ifd_data[i].cmd_buf_offset); return -EINVAL; } - - curr_field = (char *) (req->cmd_req_buf + - req->ifd_data[i].cmd_buf_offset); - for (j = 0; j < MAX_ION_FD; j++) { - if ((req->ifd_data[j].fd > 0) && i != j) { - temp_field = (char *) (req->cmd_req_buf + - req->ifd_data[j].cmd_buf_offset); - if (temp_field >= curr_field && temp_field < - (curr_field + size)) { - pr_err("Invalid field offset 0x%x\n", - req->ifd_data[i].cmd_buf_offset); - return -EINVAL; - } - } - } } else if ((data->type == QSEECOM_LISTENER_SERVICE) && (lstnr_resp->ifd_data[i].fd > 0)) { - if ((lstnr_resp->resp_len < size) || + if ((lstnr_resp->resp_len < sizeof(uint32_t)) || (lstnr_resp->ifd_data[i].cmd_buf_offset > - lstnr_resp->resp_len - size)) { + lstnr_resp->resp_len - sizeof(uint32_t))) { pr_err("Invalid offset (lstnr resp len) 0x%x\n", lstnr_resp->ifd_data[i].cmd_buf_offset); return -EINVAL; } + } + return 0; +} + +static int __boundary_checks_offset_64(struct qseecom_send_modfd_cmd_req *req, + struct qseecom_send_modfd_listener_resp *lstnr_resp, + struct qseecom_dev_handle *data, int i) +{ - curr_field = (char *) (lstnr_resp->resp_buf_ptr + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + if ((req->cmd_req_len < sizeof(uint64_t)) || + (req->ifd_data[i].cmd_buf_offset > + req->cmd_req_len - sizeof(uint64_t))) { + pr_err("Invalid offset (req len) 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + if ((lstnr_resp->resp_len < sizeof(uint64_t)) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + lstnr_resp->resp_len - sizeof(uint64_t))) { + pr_err("Invalid offset (lstnr resp len) 0x%x\n", lstnr_resp->ifd_data[i].cmd_buf_offset); - for (j = 0; j < MAX_ION_FD; j++) { - if ((lstnr_resp->ifd_data[j].fd > 0) && i != j) { - temp_field = (char *) lstnr_resp->resp_buf_ptr + - lstnr_resp->ifd_data[j].cmd_buf_offset; - if (temp_field >= curr_field && temp_field < - (curr_field + size)) { - pr_err("Invalid lstnr field offset 0x%x\n", - lstnr_resp->ifd_data[i].cmd_buf_offset); - return -EINVAL; - } - } + return -EINVAL; } } return 0; @@ -3688,10 +3681,8 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup, if (sg_ptr->nents == 1) { uint32_t *update; - if (__boundary_checks_offset(req, lstnr_resp, data, i, - sizeof(uint32_t))) + if (__boundary_checks_offset(req, lstnr_resp, data, i)) goto err; - if ((data->type == QSEECOM_CLIENT_APP && (data->client.app_arch == ELFCLASS32 || data->client.app_arch == ELFCLASS64)) || @@ -3722,10 +3713,30 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup, struct qseecom_sg_entry *update; int j = 0; - if (__boundary_checks_offset(req, lstnr_resp, data, i, - (SG_ENTRY_SZ * sg_ptr->nents))) - goto err; + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + if ((req->cmd_req_len < + SG_ENTRY_SZ * sg_ptr->nents) || + (req->ifd_data[i].cmd_buf_offset > + (req->cmd_req_len - + SG_ENTRY_SZ * sg_ptr->nents))) { + pr_err("Invalid offset = 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + goto err; + } + + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + + if ((lstnr_resp->resp_len < + SG_ENTRY_SZ * sg_ptr->nents) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + (lstnr_resp->resp_len - + SG_ENTRY_SZ * sg_ptr->nents))) { + goto err; + } + } if ((data->type == QSEECOM_CLIENT_APP && (data->client.app_arch == ELFCLASS32 || data->client.app_arch == ELFCLASS64)) || @@ -3951,10 +3962,9 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, if (sg_ptr->nents == 1) { uint64_t *update_64bit; - if (__boundary_checks_offset(req, lstnr_resp, data, i, - sizeof(uint64_t))) + if (__boundary_checks_offset_64(req, lstnr_resp, + data, i)) goto err; - /* 64bit app uses 64bit address */ update_64bit = (uint64_t *) field; *update_64bit = cleanup ? 0 : @@ -3964,9 +3974,30 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, struct qseecom_sg_entry_64bit *update_64bit; int j = 0; - if (__boundary_checks_offset(req, lstnr_resp, data, i, - (SG_ENTRY_SZ_64BIT * sg_ptr->nents))) - goto err; + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + + if ((req->cmd_req_len < + SG_ENTRY_SZ_64BIT * sg_ptr->nents) || + (req->ifd_data[i].cmd_buf_offset > + (req->cmd_req_len - + SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { + pr_err("Invalid offset = 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + goto err; + } + + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + + if ((lstnr_resp->resp_len < + SG_ENTRY_SZ_64BIT * sg_ptr->nents) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + (lstnr_resp->resp_len - + SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { + goto err; + } + } /* 64bit app uses 64bit address */ update_64bit = (struct qseecom_sg_entry_64bit *)field; for (j = 0; j < sg_ptr->nents; j++) { diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 0bdfa90ea6cd..b9da2c6cc981 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -750,7 +750,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, return VMCI_ERROR_MORE_DATA; } - dbells = kzalloc(data_size, GFP_ATOMIC); + dbells = kmalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index c372f6a59bf7..305a3449e946 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -334,7 +334,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) { int result; - struct vmci_notify_bm_set_msg bitmap_set_msg = { }; + struct vmci_notify_bm_set_msg bitmap_set_msg; bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_SET_NOTIFY_BITMAP); diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 9ac3659a5537..189b32519748 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -172,7 +172,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev) VMCI_UTIL_NUM_RESOURCES * sizeof(u32); struct vmci_datagram *check_msg; - check_msg = kzalloc(msg_size, GFP_KERNEL); + check_msg = kmalloc(msg_size, GFP_KERNEL); if (!check_msg) { dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); return -ENOMEM; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 11325bc70aef..e57340e980c4 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -639,9 +639,6 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); - if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) - return NULL; - queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); if (queue) { queue->q_header = NULL; @@ -735,7 +732,7 @@ static void qp_release_pages(struct page **pages, for (i = 0; i < num_pages; i++) { if (dirty) - set_page_dirty_lock(pages[i]); + set_page_dirty(pages[i]); page_cache_release(pages[i]); pages[i] = NULL; @@ -2344,8 +2341,7 @@ int vmci_qp_broker_map(struct vmci_handle handle, is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; result = VMCI_SUCCESS; - if (context_id != VMCI_HOST_CONTEXT_ID && - !QPBROKERSTATE_HAS_MEM(entry)) { + if (context_id != VMCI_HOST_CONTEXT_ID) { struct vmci_qp_page_store page_store; page_store.pages = guest_mem; @@ -2455,8 +2451,7 @@ int vmci_qp_broker_unmap(struct vmci_handle handle, is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; - if (context_id != VMCI_HOST_CONTEXT_ID && - QPBROKERSTATE_HAS_MEM(entry)) { + if (context_id != VMCI_HOST_CONTEXT_ID) { qp_acquire_queue_mutex(entry->produce_q); result = qp_save_headers(entry); if (result < VMCI_SUCCESS) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 1cc5924ac84b..b1678ec5e439 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -416,6 +416,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; + /* Some eMMC set the value too low so set a minimum */ + if (card->ext_csd.part_time && + card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) + card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) @@ -669,17 +673,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.cache_flush_policy = 0; } - /* - * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined - * when accessing a specific field", so use it here if there is no - * PARTITION_SWITCH_TIME. - */ - if (!card->ext_csd.part_time) - card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; - /* Some eMMC set the value too low so set a minimum */ - if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME) - card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME; - /* eMMC v5 or later */ if (card->ext_csd.rev >= 7) { memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION], diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index cd1c632e3240..d7b0c7f28a9a 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -151,9 +151,6 @@ static int mmc_decode_csd(struct mmc_card *card) csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; csd->erase_size <<= csd->write_blkbits - 9; } - - if (UNSTUFF_BITS(resp, 13, 1)) - mmc_card_set_readonly(card); break; case 1: /* @@ -188,9 +185,6 @@ static int mmc_decode_csd(struct mmc_card *card) csd->write_blkbits = 9; csd->write_partial = 0; csd->erase_size = 1; - - if (UNSTUFF_BITS(resp, 13, 1)) - mmc_card_set_readonly(card); break; default: pr_err("%s: unrecognised CSD structure version %d\n", diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index aeae5a2ccd9f..0ec41910d7b9 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -24,8 +24,6 @@ #include "sdio_cis.h" #include "sdio_ops.h" -#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ - static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { @@ -267,8 +265,6 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) do { unsigned char tpl_code, tpl_link; - unsigned long timeout = jiffies + - msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); if (ret) @@ -329,8 +325,6 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) prev = &this->next; if (ret == -ENOENT) { - if (time_after(jiffies, timeout)) - break; /* warn about unknown tuples */ pr_warn_ratelimited("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2f1f4f3959fb..e0d8e1958ae4 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -377,7 +377,7 @@ config MMC_OMAP_HS config MMC_WBSD tristate "Winbond W83L51xD SD/MMC Card Interface support" - depends on ISA_DMA_API && !M68K + depends on ISA_DMA_API help This selects the Winbond(R) W83L51xD Secure digital and Multimedia card Interface. diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 4ebd9c8e5a47..3a7e835a0033 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -442,18 +442,6 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) } } - /* - * If there is no cadiates value, then it needs to return -EIO. - * If there are candiates values and don't find bset clk sample value, - * then use a first candiates clock sample value. - */ - for (i = 0; i < iter; i++) { - __c = ror8(candiates, i); - if ((__c & 0x1) == 0x1) { - loc = i; - goto out; - } - } out: return loc; } @@ -484,8 +472,6 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode) priv->tuned_sample = found; } else { ret = -EIO; - dev_warn(&mmc->class_dev, - "There is no candiates value about clksmpl!\n"); } return ret; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4b5cca022642..581f5d0271f4 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -380,7 +380,7 @@ static void dw_mci_start_command(struct dw_mci *host, static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) { - struct mmc_command *stop = &host->stop_abort; + struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; dw_mci_start_command(host, stop, host->stop_cmdr); } @@ -701,7 +701,6 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, int ret = 0; /* Set external dma config: burst size, burst width */ - memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = host->phy_regs + fifo_offset; cfg.src_addr = cfg.dst_addr; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -1203,7 +1202,10 @@ static void __dw_mci_start_request(struct dw_mci *host, spin_unlock_irqrestore(&host->irq_lock, irqflags); } - host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); + if (mrq->stop) + host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); + else + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); } static void dw_mci_start_request(struct dw_mci *host, @@ -1742,35 +1744,8 @@ static void dw_mci_tasklet_func(unsigned long priv) } if (cmd->data && err) { - /* - * During UHS tuning sequence, sending the stop - * command after the response CRC error would - * throw the system into a confused state - * causing all future tuning phases to report - * failure. - * - * In such case controller will move into a data - * transfer state after a response error or - * response CRC error. Let's let that finish - * before trying to send a stop, so we'll go to - * STATE_SENDING_DATA. - * - * Although letting the data transfer take place - * will waste a bit of time (we already know - * the command was bad), it can't cause any - * errors since it's possible it would have - * taken place anyway if this tasklet got - * delayed. Allowing the transfer to take place - * avoids races and keeps things simple. - */ - if (err != -ETIMEDOUT && - host->dir_status == DW_MCI_RECV_STATUS) { - state = STATE_SENDING_DATA; - continue; - } - - send_stop_abort(host, data); dw_mci_stop_dma(host); + send_stop_abort(host, data); state = STATE_SENDING_STOP; break; } @@ -1794,10 +1769,11 @@ static void dw_mci_tasklet_func(unsigned long priv) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - if (!(host->data_status & (SDMMC_INT_DRTO | + dw_mci_stop_dma(host); + if (data->stop || + !(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); - dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -1831,10 +1807,11 @@ static void dw_mci_tasklet_func(unsigned long priv) */ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { - if (!(host->data_status & (SDMMC_INT_DRTO | + dw_mci_stop_dma(host); + if (data->stop || + !(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE))) send_stop_abort(host, data); - dw_mci_stop_dma(host); state = STATE_DATA_ERROR; break; } @@ -1909,7 +1886,7 @@ static void dw_mci_tasklet_func(unsigned long priv) host->cmd = NULL; host->data = NULL; - if (!mrq->sbc && mrq->stop) + if (mrq->stop) dw_mci_command_complete(host, mrq->stop); else host->cmd_status = 0; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index a5b03fb7656d..bbad309679cf 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -633,7 +633,6 @@ static int moxart_probe(struct platform_device *pdev) host->dma_chan_tx, host->dma_chan_rx); host->have_dma = true; - memset(&cfg, 0, sizeof(cfg)); cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -698,12 +697,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); + mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); - mmc_free_host(mmc); } return 0; } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 1770c8df9d1b..5ef25463494f 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -720,13 +720,13 @@ static void msdc_track_cmd_data(struct msdc_host *host, static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) { unsigned long flags; + bool ret; - /* - * No need check the return value of cancel_delayed_work, as only ONE - * path will go here! - */ - cancel_delayed_work(&host->req_timeout); - + ret = cancel_delayed_work(&host->req_timeout); + if (!ret) { + /* delay work already running */ + return; + } spin_lock_irqsave(&host->lock, flags); host->mrq = NULL; spin_unlock_irqrestore(&host->lock, flags); @@ -747,7 +747,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, bool done = false; bool sbc_error; unsigned long flags; - u32 *rsp; + u32 *rsp = cmd->resp; if (mrq->sbc && cmd == mrq->cmd && (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR @@ -768,7 +768,6 @@ static bool msdc_cmd_done(struct msdc_host *host, int events, if (done) return true; - rsp = cmd->resp; sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); @@ -943,7 +942,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, struct mmc_request *mrq, struct mmc_data *data) { - struct mmc_command *stop; + struct mmc_command *stop = data->stop; unsigned long flags; bool done; unsigned int check_data = events & @@ -959,7 +958,6 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, if (done) return true; - stop = data->stop; if (check_data || (stop && stop->error)) { dev_dbg(host->dev, "DMA status: 0x%8X\n", diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 687fd68fbbcd..c8b8ac66ff7e 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -651,7 +651,7 @@ static int mxs_mmc_probe(struct platform_device *pdev) ret = mmc_of_parse(mmc); if (ret) - goto out_free_dma; + goto out_clk_disable; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 10ec88833889..93137483ecde 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -553,22 +553,9 @@ static int sd_write_long_data(struct realtek_pci_sdmmc *host, return 0; } -static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); -} - -static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) -{ - rtsx_pci_write_register(host->pcr, SD_CFG1, - SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); -} - static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; - int err; if (host->sg_count < 0) { data->error = host->sg_count; @@ -577,19 +564,22 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) return data->error; } - if (data->flags & MMC_DATA_READ) { - if (host->initial_mode) - sd_disable_initial_mode(host); - - err = sd_read_long_data(host, mrq); + if (data->flags & MMC_DATA_READ) + return sd_read_long_data(host, mrq); - if (host->initial_mode) - sd_enable_initial_mode(host); + return sd_write_long_data(host, mrq); +} - return err; - } +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} - return sd_write_long_data(host, mrq); +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); } static void sd_normal_rw(struct realtek_pci_sdmmc *host, diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index b95d911ef497..8d838779fd1b 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -1240,10 +1240,9 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - int dead; + int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); pm_runtime_get_sync(&pdev->dev); - dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index a7dd9378d7a3..68007a96de39 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1298,10 +1298,6 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) u16 preset = 0; switch (host->timing) { - case MMC_TIMING_MMC_HS: - case MMC_TIMING_SD_HS: - preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); - break; case MMC_TIMING_UHS_SDR12: preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); break; @@ -1488,12 +1484,6 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode, break; case MMC_VDD_32_33: case MMC_VDD_33_34: - /* - * 3.4 ~ 3.6V are valid only for those platforms where it's - * known that the voltage range is supported by hardware. - */ - case MMC_VDD_34_35: - case MMC_VDD_35_36: pwr = SDHCI_POWER_330; break; default: diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index e53f0c6ed375..c6178c099039 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -244,7 +244,6 @@ /* 60-FB reserved */ -#define SDHCI_PRESET_FOR_HIGH_SPEED 0x64 #define SDHCI_PRESET_FOR_SDR12 0x66 #define SDHCI_PRESET_FOR_SDR25 0x68 #define SDHCI_PRESET_FOR_SDR50 0x6A diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 49798a68299e..b47122d3e8d8 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1751,7 +1751,6 @@ static int usdhi6_probe(struct platform_device *pdev) version = usdhi6_read(host, USDHI6_VERSION); if ((version & 0xfff) != 0xa0d) { - ret = -EPERM; dev_err(dev, "Version not recognized %x\n", version); goto e_clk_off; } @@ -1809,12 +1808,10 @@ static int usdhi6_probe(struct platform_device *pdev) ret = mmc_add_host(mmc); if (ret < 0) - goto e_release_dma; + goto e_clk_off; return 0; -e_release_dma: - usdhi6_dma_release(host); e_clk_off: clk_disable_unprepare(host->clk); e_free_mmc: diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index a3472127bea3..b455e9cf95af 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -859,9 +859,6 @@ static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask) { BUG_ON(intmask == 0); - if (!host->data) - return; - if (intmask & VIA_CRDR_SDSTS_DT) host->data->error = -ETIMEDOUT; else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)) diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 69987b9066f6..1e819f98b94f 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -579,7 +579,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300) GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, - sizeof(vub300->system_port_status), 1000); + sizeof(vub300->system_port_status), HZ); if (sizeof(vub300->system_port_status) == retval) new_system_port_status(vub300); } @@ -1245,7 +1245,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, SET_INTERRUPT_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, - xfer_buffer, xfer_length, 1000); + xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, @@ -1292,7 +1292,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, SET_TRANSFER_PSEUDOCODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, - xfer_buffer, xfer_length, 1000); + xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); if (retval < 0) { strncpy(vub300->vub_name, @@ -1998,7 +1998,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_CLOCK_SPEED, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x00, 0x00, buf, buf_array_size, 1000); + 0x00, 0x00, buf, buf_array_size, HZ); if (retval != 8) { dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" " %dkHz failed with retval=%d\n", kHzClock, retval); @@ -2020,14 +2020,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, 0x0000, NULL, 0, 1000); + 0x0000, 0x0000, NULL, 0, HZ); /* must wait for the VUB300 u-proc to boot up */ msleep(600); } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), SET_SD_POWER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0001, 0x0000, NULL, 0, 1000); + 0x0001, 0x0000, NULL, 0, HZ); msleep(600); vub300->card_powered = 1; } else if (ios->power_mode == MMC_POWER_ON) { @@ -2290,14 +2290,14 @@ static int vub300_probe(struct usb_interface *interface, GET_HC_INF0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->hc_info, - sizeof(vub300->hc_info), 1000); + sizeof(vub300->hc_info), HZ); if (retval < 0) goto error5; retval = - usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), SET_ROM_WAIT_STATES, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - firmware_rom_wait_states, 0x0000, NULL, 0, 1000); + firmware_rom_wait_states, 0x0000, NULL, 0, HZ); if (retval < 0) goto error5; dev_info(&vub300->udev->dev, @@ -2312,7 +2312,7 @@ static int vub300_probe(struct usb_interface *interface, GET_SYSTEM_PORT_STATUS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0000, 0x0000, &vub300->system_port_status, - sizeof(vub300->system_port_status), 1000); + sizeof(vub300->system_port_status), HZ); if (retval < 0) { goto error4; } else if (sizeof(vub300->system_port_status) == retval) { diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index ab5b023ad279..ffbc9b304beb 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -228,7 +228,7 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id, *semicol, *open_parenth; + char *p, *mtd_id, *semicol; /* * Replace the first ';' by a NULL char so strrchr can work @@ -238,14 +238,6 @@ static int mtdpart_setup_real(char *s) if (semicol) *semicol = '\0'; - /* - * make sure that part-names with ":" will not be handled as - * part of the mtd-id with an ":" - */ - open_parenth = strchr(s, '('); - if (open_parenth) - *open_parenth = '\0'; - mtd_id = s; /* @@ -255,10 +247,6 @@ static int mtdpart_setup_real(char *s) */ p = strrchr(s, ':'); - /* Restore the '(' now. */ - if (open_parenth) - *open_parenth = '('; - /* Restore the ';' now. */ if (semicol) *semicol = ';'; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 0a2832782807..ce87d9506f6a 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -616,12 +616,16 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) case MEMGETINFO: case MEMREADOOB: case MEMREADOOB64: + case MEMLOCK: + case MEMUNLOCK: case MEMISLOCKED: case MEMGETOOBSEL: case MEMGETBADBLOCK: + case MEMSETBADBLOCK: case OTPSELECT: case OTPGETREGIONCOUNT: case OTPGETREGIONINFO: + case OTPLOCK: case ECCGETLAYOUT: case ECCGETSTATS: case MTDFILEMODE: @@ -632,13 +636,9 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) /* "dangerous" commands */ case MEMERASE: case MEMERASE64: - case MEMLOCK: - case MEMUNLOCK: - case MEMSETBADBLOCK: case MEMWRITEOOB: case MEMWRITEOOB64: case MEMWRITE: - case OTPLOCK: if (!(file->f_mode & FMODE_WRITE)) return -EPERM; break; diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index ab9b2a913411..8278158715c1 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -1460,7 +1460,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (ret != -EBADMSG) { + if (!ret) { *err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR) | ((u64)(brcmnand_read_reg(ctrl, diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 10c063d73ee3..9de78d2a2eb1 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -672,7 +672,7 @@ static int cafe_nand_probe(struct pci_dev *pdev, "CAFE NAND", mtd); if (err) { dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq); - goto out_free_rs; + goto out_ior; } /* Disable master reset, enable NAND clock */ @@ -781,8 +781,6 @@ static int cafe_nand_probe(struct pci_dev *pdev, /* Disable NAND IRQ in global IRQ mask register */ cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); free_irq(pdev->irq, mtd); - out_free_rs: - free_rs(cafe->rs); out_ior: pci_iounmap(pdev, cafe->mmio); out_free_mtd: diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index 557fcf1c21fe..0802158a3f75 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -1608,10 +1608,13 @@ static int __init doc_probe(unsigned long physadr) numchips = doc2001_init(mtd); if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) { - /* DBB note: i believe nand_cleanup is necessary here, as + /* DBB note: i believe nand_release is necessary here, as buffers may have been allocated in nand_base. Check with Thomas. FIX ME! */ - nand_cleanup(nand); + /* nand_release will call mtd_device_unregister, but we + haven't yet added it. This is handled without incident by + mtd_device_unregister, as far as I can tell. */ + nand_release(mtd); kfree(mtd); goto fail; } diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 7b4278d50b45..ee83749fb1d3 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -165,7 +165,7 @@ static int __init orion_nand_probe(struct platform_device *pdev) ret = mtd_device_parse_register(mtd, NULL, &ppdata, board->parts, board->nr_parts); if (ret) { - nand_cleanup(nc); + nand_release(mtd); goto no_dev; } diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index 8d289a882ca7..83cf021b9651 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c @@ -167,7 +167,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev) if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); err = -ENODEV; - goto out_cleanup_nand; + goto out_lpc; } printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n", @@ -175,8 +175,6 @@ static int pasemi_nand_probe(struct platform_device *ofdev) return 0; - out_cleanup_nand: - nand_cleanup(chip); out_lpc: release_region(lpcctl, 4); out_ior: diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 89c4a19b1740..65b9dbbe6d6a 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -102,7 +102,7 @@ static int plat_nand_probe(struct platform_device *pdev) if (!err) return err; - nand_cleanup(&data->chip); + nand_release(&data->mtd); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index 42b2a8d90d33..082b6009736d 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c @@ -189,7 +189,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev) return 0; err_add: - nand_cleanup(this); + nand_release(&sharpsl->mtd); err_scan: iounmap(sharpsl->io); diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index 8775111837f4..b94f53427f0f 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c @@ -204,7 +204,7 @@ static int socrates_nand_probe(struct platform_device *ofdev) if (!res) return res; - nand_cleanup(nand_chip); + nand_release(mtd); out: iounmap(host->io_base); diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index d8c6c09917ad..befddf0776e4 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c @@ -445,7 +445,7 @@ static int tmio_probe(struct platform_device *dev) if (!retval) return retval; - nand_cleanup(nand_chip); + nand_release(mtd); err_irq: tmio_hw_stop(dev, tmio); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 841a5de58c7c..7f2a032c354c 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -324,8 +324,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) break; } - dev->base_addr = ioaddr; - /* Reserve any actual interrupt. */ if (dev->irq) { retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev); @@ -333,6 +331,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) goto err_out; } + dev->base_addr = ioaddr; + lp = netdev_priv(dev); spin_lock_init(&lp->lock); diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 1c9fff038569..239de38fbd6a 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -115,9 +115,6 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; - if (!ci) - return -EINVAL; - priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 38b488a729f5..22ebdf4d8cc4 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -939,8 +939,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { + __enable_port(port); - *update_slave_arr = true; } } break; @@ -1686,7 +1686,6 @@ static void ad_agg_selection_logic(struct aggregator *agg, port = port->next_port_in_aggregator) { __enable_port(port); } - *update_slave_arr = true; } } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bbf18d6e4f67..5811235a64c8 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -785,14 +785,14 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + if (!slave || !bond->send_peer_notif || !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - return true; } @@ -1225,39 +1225,7 @@ static void bond_upper_dev_unlink(struct net_device *bond_dev, rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); } -static void slave_kobj_release(struct kobject *kobj) -{ - struct slave *slave = to_slave(kobj); - struct bonding *bond = bond_get_bond_by_slave(slave); - - cancel_delayed_work_sync(&slave->notify_work); - if (BOND_MODE(bond) == BOND_MODE_8023AD) - kfree(SLAVE_AD_INFO(slave)); - - kfree(slave); -} - -static struct kobj_type slave_ktype = { - .release = slave_kobj_release, -#ifdef CONFIG_SYSFS - .sysfs_ops = &slave_sysfs_ops, -#endif -}; - -static int bond_kobj_init(struct slave *slave) -{ - int err; - - err = kobject_init_and_add(&slave->kobj, &slave_ktype, - &(slave->dev->dev.kobj), "bonding_slave"); - if (err) - kobject_put(&slave->kobj); - - return err; -} - -static struct slave *bond_alloc_slave(struct bonding *bond, - struct net_device *slave_dev) +static struct slave *bond_alloc_slave(struct bonding *bond) { struct slave *slave = NULL; @@ -1265,25 +1233,30 @@ static struct slave *bond_alloc_slave(struct bonding *bond, if (!slave) return NULL; - slave->bond = bond; - slave->dev = slave_dev; - INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); - - if (bond_kobj_init(slave)) - return NULL; - if (BOND_MODE(bond) == BOND_MODE_8023AD) { SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), GFP_KERNEL); if (!SLAVE_AD_INFO(slave)) { - kobject_put(&slave->kobj); + kfree(slave); return NULL; } } + INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work); return slave; } +static void bond_free_slave(struct slave *slave) +{ + struct bonding *bond = bond_get_bond_by_slave(slave); + + cancel_delayed_work_sync(&slave->notify_work); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + kfree(SLAVE_AD_INFO(slave)); + + kfree(slave); +} + static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) { info->bond_mode = BOND_MODE(bond); @@ -1447,12 +1420,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev); - new_slave = bond_alloc_slave(bond, slave_dev); + new_slave = bond_alloc_slave(bond); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } + new_slave->bond = bond; + new_slave->dev = slave_dev; /* Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ @@ -1766,7 +1741,7 @@ err_restore_mtu: dev_set_mtu(slave_dev, new_slave->original_mtu); err_free: - kobject_put(&new_slave->kobj); + bond_free_slave(new_slave); err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ @@ -1945,7 +1920,7 @@ static int __bond_release_one(struct net_device *bond_dev, if (!netif_is_bond_master(slave_dev)) slave_dev->priv_flags &= ~IFF_BONDING; - kobject_put(&slave->kobj); + bond_free_slave(slave); return 0; } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 1d95a83d2bae..1022e80aaf97 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1407,7 +1407,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, mac = (u8 *)&newval->value; } - if (is_multicast_ether_addr(mac)) + if (!is_valid_ether_addr(mac)) goto err; netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac); diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 1e1e77a40f18..3f756fa2f603 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -112,19 +112,20 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf) } static SLAVE_ATTR_RO(ad_partner_oper_port_state); -static const struct attribute *slave_attrs[] = { - &slave_attr_state.attr, - &slave_attr_mii_status.attr, - &slave_attr_link_failure_count.attr, - &slave_attr_perm_hwaddr.attr, - &slave_attr_queue_id.attr, - &slave_attr_ad_aggregator_id.attr, - &slave_attr_ad_actor_oper_port_state.attr, - &slave_attr_ad_partner_oper_port_state.attr, +static const struct slave_attribute *slave_attrs[] = { + &slave_attr_state, + &slave_attr_mii_status, + &slave_attr_link_failure_count, + &slave_attr_perm_hwaddr, + &slave_attr_queue_id, + &slave_attr_ad_aggregator_id, + &slave_attr_ad_actor_oper_port_state, + &slave_attr_ad_partner_oper_port_state, NULL }; #define to_slave_attr(_at) container_of(_at, struct slave_attribute, attr) +#define to_slave(obj) container_of(obj, struct slave, kobj) static ssize_t slave_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -135,16 +136,45 @@ static ssize_t slave_show(struct kobject *kobj, return slave_attr->show(slave, buf); } -const struct sysfs_ops slave_sysfs_ops = { +static const struct sysfs_ops slave_sysfs_ops = { .show = slave_show, }; +static struct kobj_type slave_ktype = { +#ifdef CONFIG_SYSFS + .sysfs_ops = &slave_sysfs_ops, +#endif +}; + int bond_sysfs_slave_add(struct slave *slave) { - return sysfs_create_files(&slave->kobj, slave_attrs); + const struct slave_attribute **a; + int err; + + err = kobject_init_and_add(&slave->kobj, &slave_ktype, + &(slave->dev->dev.kobj), "bonding_slave"); + if (err) { + kobject_put(&slave->kobj); + return err; + } + + for (a = slave_attrs; *a; ++a) { + err = sysfs_create_file(&slave->kobj, &((*a)->attr)); + if (err) { + kobject_put(&slave->kobj); + return err; + } + } + + return 0; } void bond_sysfs_slave_del(struct slave *slave) { - sysfs_remove_files(&slave->kobj, slave_attrs); + const struct slave_attribute **a; + + for (a = slave_attrs; *a; ++a) + sysfs_remove_file(&slave->kobj, &((*a)->attr)); + + kobject_put(&slave->kobj); } diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 1243c2e5a86a..c2dea4916e5d 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -281,6 +281,7 @@ static int caif_xmit(struct sk_buff *skb, struct net_device *dev) { struct ser_device *ser; + BUG_ON(dev == NULL); ser = netdev_priv(dev); /* Send flow off once, on high water mark */ @@ -362,7 +363,6 @@ static int ldisc_open(struct tty_struct *tty) rtnl_lock(); result = register_netdevice(dev); if (result) { - tty_kref_put(tty); rtnl_unlock(); free_netdev(dev); return -ENODEV; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index c41ab2cb272e..4ead5a18b794 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -212,6 +212,18 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; +static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_enable(priv->device); +} + +static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_disable(priv->device); +} + static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) @@ -1306,6 +1318,7 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { + struct c_can_priv *priv = netdev_priv(dev); int err; /* Deactivate pins to prevent DRA7 DCAN IP from being @@ -1315,19 +1328,28 @@ int register_c_can_dev(struct net_device *dev) */ pinctrl_pm_select_sleep_state(dev->dev.parent); + c_can_pm_runtime_enable(priv); + dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; err = register_candev(dev); - if (!err) + if (err) + c_can_pm_runtime_disable(priv); + else devm_can_led_init(dev); + return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { + struct c_can_priv *priv = netdev_priv(dev); + unregister_candev(dev); + + c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index f3e0b2124a37..d065c0e2d18e 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -239,13 +239,12 @@ static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); - void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); - pci_iounmap(pdev, addr); + pci_iounmap(pdev, priv->base); pci_disable_msi(pdev); pci_clear_master(pdev); pci_release_regions(pdev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index c6a03f565e3f..717530eac70c 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -386,7 +385,6 @@ static int c_can_plat_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); - pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", @@ -399,7 +397,6 @@ static int c_can_plat_probe(struct platform_device *pdev) return 0; exit_free_device: - pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); @@ -410,10 +407,9 @@ exit: static int c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); - struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); - pm_runtime_disable(priv->device); + free_c_can_dev(dev); return 0; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 2835aeb11c9f..52110017fd40 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -525,11 +525,11 @@ static void can_restart(struct net_device *dev) } cf->can_id |= CAN_ERR_RESTARTED; + netif_rx_ni(skb); + stats->rx_packets++; stats->rx_bytes += cf->can_dlc; - netif_rx_ni(skb); - restart: netdev_dbg(dev, "restarted\n"); priv->can_stats.restarts++; @@ -987,7 +987,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct can_ctrlmode cm = {.flags = priv->ctrlmode}; - struct can_berr_counter bec = { }; + struct can_berr_counter bec; enum can_state state = priv->state; if (priv->do_get_state) @@ -1054,7 +1054,6 @@ static void can_dellink(struct net_device *dev, struct list_head *head) static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", - .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index dcad5213eb34..6b866d0451b2 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -379,17 +379,11 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; - unsigned int timeout; - u32 bitrate = priv->can.bittiming.bitrate; + unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; u32 reg; - if (bitrate) - timeout = 1000 * 1000 * 10 / bitrate; - else - timeout = FLEXCAN_TIMEOUT_US / 10; - reg = flexcan_read(®s->mcr); - reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; + reg |= FLEXCAN_MCR_HALT; flexcan_write(reg, ®s->mcr); while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) @@ -1104,14 +1098,10 @@ static int register_flexcandev(struct net_device *dev) if (err) goto out_chip_disable; - /* set freeze, halt */ - err = flexcan_chip_freeze(priv); - if (err) - goto out_chip_disable; - - /* activate FIFO, restrict register access */ + /* set freeze, halt and activate FIFO, restrict register access */ reg = flexcan_read(®s->mcr); - reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; + reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | + FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; flexcan_write(reg, ®s->mcr); /* Currently we only support newer versions of this core diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 197c27d8f584..0bd7e7164796 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -428,6 +428,9 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) } while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { + if (rxfs & RXFS_RFL) + netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); + m_can_read_fifo(dev, rxfs); quota--; diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index ced11ea89269..c1317889d3d8 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -703,11 +703,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) cf->data[i + 1] = data_reg >> 8; } + netif_receive_skb(skb); rcv_pkts++; stats->rx_packets++; quota--; stats->rx_bytes += cf->can_dlc; - netif_receive_skb(skb); pch_fifo_thresh(priv, obj_num); obj_num++; diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index ed40405c8f4e..9d93492ddfcc 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -858,12 +858,10 @@ static int __maybe_unused rcar_can_suspend(struct device *dev) struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; - if (!netif_running(ndev)) - return 0; - - netif_stop_queue(ndev); - netif_device_detach(ndev); - + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } ctlr = readw(&priv->regs->ctlr); ctlr |= RCAR_CAN_CTLR_CANM_HALT; writew(ctlr, &priv->regs->ctlr); @@ -882,9 +880,6 @@ static int __maybe_unused rcar_can_resume(struct device *dev) u16 ctlr; int err; - if (!netif_running(ndev)) - return 0; - err = clk_enable(priv->clk); if (err) { netdev_err(ndev, "clk_enable() failed, error %d\n", err); @@ -898,9 +893,10 @@ static int __maybe_unused rcar_can_resume(struct device *dev) writew(ctlr, &priv->regs->ctlr); priv->can.state = CAN_STATE_ERROR_ACTIVE; - netif_device_attach(ndev); - netif_start_queue(ndev); - + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } return 0; } diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c index fef5c59c0f4c..381de998d2f1 100644 --- a/drivers/net/can/sja1000/ems_pcmcia.c +++ b/drivers/net/can/sja1000/ems_pcmcia.c @@ -243,12 +243,7 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) free_sja1000dev(dev); } - if (!card->channels) { - err = -ENODEV; - goto failure_cleanup; - } - - err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, + err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index e12fc5d88382..131026fbc2d7 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -736,15 +736,16 @@ static void peak_pci_remove(struct pci_dev *pdev) struct net_device *prev_dev = chan->prev_dev; dev_info(&pdev->dev, "removing device %s\n", dev->name); - /* do that only for first channel */ - if (!prev_dev && chan->pciec_card) - peak_pciec_remove(chan->pciec_card); unregister_sja1000dev(dev); free_sja1000dev(dev); dev = prev_dev; - if (!dev) + if (!dev) { + /* do that only for first channel */ + if (chan->pciec_card) + peak_pciec_remove(chan->pciec_card); break; + } priv = netdev_priv(dev); chan = priv->priv; } diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 9fbed88d6c82..cdc0c7433a4b 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -304,7 +304,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia) return 0; platform_failed: - platform_device_put(pdev); + kfree(dev); mem_failed: pcmcia_bad: pcmcia_failed: diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index a74c779feb90..52fe50725d74 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -576,19 +576,18 @@ int softing_startstop(struct net_device *dev, int up) if (ret < 0) goto failed; } - - /* enable_error_frame - * + /* enable_error_frame */ + /* * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later - */ - if (0 && error_reporting) { + * + if (error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } - + */ /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index fd48770ba792..7621f91a8a20 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -393,13 +393,8 @@ static int softing_netdev_open(struct net_device *ndev) /* check or determine and set bittime */ ret = open_candev(ndev); - if (ret) - return ret; - - ret = softing_startstop(ndev, 1); - if (ret < 0) - close_candev(ndev); - + if (!ret) + ret = softing_startstop(ndev, 1); return ret; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index f0f60e1fde66..047348033e27 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -267,8 +267,6 @@ struct ems_usb { unsigned int free_slots; /* remember number of available slots */ struct ems_cpc_msg active_params; /* active controller parameters */ - void *rxbuf[MAX_RX_URBS]; - dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; static void ems_usb_read_interrupt_callback(struct urb *urb) @@ -602,7 +600,6 @@ static int ems_usb_start(struct ems_usb *dev) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; - dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -613,7 +610,7 @@ static int ems_usb_start(struct ems_usb *dev) } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &buf_dma); + &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -621,8 +618,6 @@ static int ems_usb_start(struct ems_usb *dev) break; } - urb->transfer_dma = buf_dma; - usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); @@ -638,9 +633,6 @@ static int ems_usb_start(struct ems_usb *dev) break; } - dev->rxbuf[i] = buf; - dev->rxbuf_dma[i] = buf_dma; - /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -706,10 +698,6 @@ static void unlink_all_urbs(struct ems_usb *dev) usb_kill_anchored_urbs(&dev->rx_submitted); - for (i = 0; i < MAX_RX_URBS; ++i) - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, - dev->rxbuf[i], dev->rxbuf_dma[i]); - usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); @@ -1083,6 +1071,7 @@ static void ems_usb_disconnect(struct usb_interface *intf) if (dev) { unregister_netdev(dev->netdev); + free_candev(dev->netdev); unlink_all_urbs(dev); @@ -1090,8 +1079,6 @@ static void ems_usb_disconnect(struct usb_interface *intf) kfree(dev->intr_in_buffer); kfree(dev->tx_msg_buffer); - - free_candev(dev->netdev); } } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index c1b1062bbc37..afa5b4a7a4a2 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -207,8 +207,6 @@ struct esd_usb2 { int net_count; u32 version; int rxinitdone; - void *rxbuf[MAX_RX_URBS]; - dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; struct esd_usb2_net_priv { @@ -236,8 +234,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, if (id == ESD_EV_CAN_ERROR_EXT) { u8 state = msg->msg.rx.data[0]; u8 ecc = msg->msg.rx.data[1]; - u8 rxerr = msg->msg.rx.data[2]; - u8 txerr = msg->msg.rx.data[3]; + u8 txerr = msg->msg.rx.data[2]; + u8 rxerr = msg->msg.rx.data[3]; skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { @@ -558,7 +556,6 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; - dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -570,7 +567,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &buf_dma); + &urb->transfer_dma); if (!buf) { dev_warn(dev->udev->dev.parent, "No memory left for USB buffer\n"); @@ -578,8 +575,6 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) goto freeurb; } - urb->transfer_dma = buf_dma; - usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), buf, RX_BUFFER_SIZE, @@ -592,12 +587,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev) usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); - goto freeurb; } - dev->rxbuf[i] = buf; - dev->rxbuf_dma[i] = buf_dma; - freeurb: /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); @@ -685,11 +676,6 @@ static void unlink_all_urbs(struct esd_usb2 *dev) int i, j; usb_kill_anchored_urbs(&dev->rx_submitted); - - for (i = 0; i < MAX_RX_URBS; ++i) - usb_free_coherent(dev->udev, RX_BUFFER_SIZE, - dev->rxbuf[i], dev->rxbuf_dma[i]); - for (i = 0; i < dev->net_count; i++) { priv = dev->nets[i]; if (priv) { diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 0df08c63f0fd..3e965b00bc09 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -174,8 +174,8 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; + atomic_t active_channels; struct usb_device *udev; - u8 active_channels; }; /* 'allocate' a tx context. @@ -303,7 +303,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) - goto device_detach; + goto resubmit_urb; dev = usbcan->canch[hf->channel]; @@ -388,7 +388,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* USB failure take down all interfaces */ if (rc == -ENODEV) { - device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { if (usbcan->canch[rc]) netif_device_detach(usbcan->canch[rc]->netdev); @@ -491,8 +490,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *net hf->echo_id = idx; hf->channel = dev->channel; - hf->flags = 0; - hf->reserved = 0; cf = (struct can_frame *)skb->data; @@ -572,7 +569,7 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (!parent->active_channels) { + if (atomic_add_return(1, &parent->active_channels) == 1) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -675,7 +672,6 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; - parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -691,8 +687,7 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - parent->active_channels--; - if (!parent->active_channels) + if (atomic_dec_and_test(&parent->active_channels)) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -906,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * return -ENOMEM; init_usb_anchor(&dev->rx_submitted); + atomic_set(&dev->active_channels, 0); + usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 81abb30d9ec0..9991ee93735a 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -31,10 +31,7 @@ #define USB_SEND_TIMEOUT 1000 /* msecs */ #define USB_RECV_TIMEOUT 1000 /* msecs */ #define RX_BUFFER_SIZE 3072 -#define KVASER_USB_CAN_CLOCK_8MHZ 8000000 -#define KVASER_USB_CAN_CLOCK_16MHZ 16000000 -#define KVASER_USB_CAN_CLOCK_24MHZ 24000000 -#define KVASER_USB_CAN_CLOCK_32MHZ 32000000 +#define CAN_USB_CLOCK 8000000 #define MAX_NET_DEVICES 3 #define MAX_USBCAN_NET_DEVICES 2 @@ -142,12 +139,6 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) #define CMD_LEAF_USB_THROTTLE 77 #define CMD_LEAF_LOG_MESSAGE 106 -/* Leaf frequency options */ -#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 -#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 -#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) -#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) - /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -478,8 +469,6 @@ struct kvaser_usb { bool rxinitdone; void *rxbuf[MAX_RX_URBS]; dma_addr_t rxbuf_dma[MAX_RX_URBS]; - - struct can_clock clock; }; struct kvaser_usb_net_priv { @@ -657,27 +646,6 @@ static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev, return rc; } -static void kvaser_usb_get_software_info_leaf(struct kvaser_usb *dev, - const struct leaf_msg_softinfo *softinfo) -{ - u32 sw_options = le32_to_cpu(softinfo->sw_options); - - dev->fw_version = le32_to_cpu(softinfo->fw_version); - dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); - - switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { - case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: - dev->clock.freq = KVASER_USB_CAN_CLOCK_16MHZ; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: - dev->clock.freq = KVASER_USB_CAN_CLOCK_24MHZ; - break; - case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: - dev->clock.freq = KVASER_USB_CAN_CLOCK_32MHZ; - break; - } -} - static int kvaser_usb_get_software_info(struct kvaser_usb *dev) { struct kvaser_msg msg; @@ -693,13 +661,14 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev) switch (dev->family) { case KVASER_LEAF: - kvaser_usb_get_software_info_leaf(dev, &msg.u.leaf.softinfo); + dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); + dev->max_tx_urbs = + le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); - dev->clock.freq = KVASER_USB_CAN_CLOCK_8MHZ; break; } @@ -1956,7 +1925,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, kvaser_usb_reset_tx_urb_contexts(priv); priv->can.state = CAN_STATE_STOPPED; - priv->can.clock.freq = dev->clock.freq; + priv->can.clock.freq = CAN_USB_CLOCK; priv->can.bittiming_const = &kvaser_usb_bittiming_const; priv->can.do_set_bittiming = kvaser_usb_set_bittiming; priv->can.do_set_mode = kvaser_usb_set_mode; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 620db93ab9a3..7b148174eb76 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -882,7 +882,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto adap_dev_free; + goto lbl_unregister_candev; } /* get device number early */ @@ -894,10 +894,6 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, return 0; -adap_dev_free: - if (dev->adapter->dev_free) - dev->adapter->dev_free(dev); - lbl_unregister_candev: unregister_candev(netdev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 1c3f95a63940..2e62cdc7ec7a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -559,10 +559,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) { new_state = CAN_STATE_ERROR_WARNING; } else { - /* back to (or still in) ERROR_ACTIVE state */ - new_state = CAN_STATE_ERROR_ACTIVE; + /* no error bit (so, no error skb, back to active state) */ + dev->can.state = CAN_STATE_ERROR_ACTIVE; pdev->bec.txerr = 0; pdev->bec.rxerr = 0; + return 0; } /* state hasn't changed */ diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 11d045699344..50d9b945089e 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -148,8 +148,7 @@ struct usb_8dev_priv { u8 *cmd_msg_buffer; struct mutex usb_8dev_cmd_lock; - void *rxbuf[MAX_RX_URBS]; - dma_addr_t rxbuf_dma[MAX_RX_URBS]; + }; /* tx frame */ @@ -747,7 +746,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; - dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); @@ -758,7 +756,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, - &buf_dma); + &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); @@ -766,8 +764,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } - urb->transfer_dma = buf_dma; - usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), @@ -785,9 +781,6 @@ static int usb_8dev_start(struct usb_8dev_priv *priv) break; } - priv->rxbuf[i] = buf; - priv->rxbuf_dma[i] = buf_dma; - /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } @@ -857,10 +850,6 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv) usb_kill_anchored_urbs(&priv->rx_submitted); - for (i = 0; i < MAX_RX_URBS; ++i) - usb_free_coherent(priv->udev, RX_BUFFER_SIZE, - priv->rxbuf[i], priv->rxbuf_dma[i]); - usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 19745e88774e..700b98d9c250 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1284,12 +1284,7 @@ static int xcan_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ - ret = platform_get_irq(pdev, 0); - if (ret < 0) - goto err_free; - - ndev->irq = ret; - + ndev->irq = platform_get_irq(pdev, 0); ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index a56f4f3a5872..0864f05633a2 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1067,10 +1067,8 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) * in bits 15:8 and the patch level in bits 7:0 which is exactly what * the REG_PHY_REVISION register layout is. */ - if (priv->int_phy_mask & BIT(port)) - return priv->hw_params.gphy_rev; - else - return 0; + + return priv->hw_params.gphy_rev; } static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 3c3f34fc2928..0e6c8f249125 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -95,7 +95,6 @@ config JME config KORINA tristate "Korina (IDT RC32434) Ethernet support" depends on MIKROTIK_RB532 - select CRC32 ---help--- If you have a Mikrotik RouterBoard 500 or IDT RC32434 based system say Y. Otherwise say N. diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index e3ca8abb14f4..20bf55dbd76f 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1579,11 +1579,10 @@ static int greth_of_remove(struct platform_device *of_dev) mdiobus_unregister(greth->mdio); unregister_netdev(ndev); + free_netdev(ndev); of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); - free_netdev(ndev); - return 0; } diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 10dda58849c6..dde3cd2d4763 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -853,13 +853,13 @@ static int emac_probe(struct platform_device *pdev) db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); - goto out_dispose_mapping; + goto out_iounmap; } ret = clk_prepare_enable(db->clk); if (ret) { dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); - goto out_dispose_mapping; + goto out_iounmap; } ret = sunxi_sram_claim(&pdev->dev); @@ -916,8 +916,6 @@ out_release_sram: sunxi_sram_release(&pdev->dev); out_clk_disable_unprepare: clk_disable_unprepare(db->clk); -out_dispose_mapping: - irq_dispose_mapping(ndev->irq); out_iounmap: iounmap(db->membase); out: @@ -936,7 +934,6 @@ static int emac_remove(struct platform_device *pdev) unregister_netdev(ndev); sunxi_sram_release(&pdev->dev); clk_disable_unprepare(db->clk); - irq_dispose_mapping(ndev->irq); iounmap(db->membase); free_netdev(ndev); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index dbd5cbc0a0a9..bb51f124d8c7 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1383,19 +1383,16 @@ static int altera_tse_probe(struct platform_device *pdev) priv->rxdescmem_busaddr = dma_res->start; } else { - ret = -ENODEV; goto err_free_netdev; } - if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) { + if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) dma_set_coherent_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)); - } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) { + else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); - } else { - ret = -EIO; + else goto err_free_netdev; - } /* MAC address space */ ret = request_and_map(pdev, "control_port", &control_port, diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index b305903c91c4..7ccebae9cb48 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1493,7 +1493,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) } pci_set_master(pdev); - if (!pci_resource_len(pdev, 0)) { + ioaddr = pci_resource_start(pdev, 0); + if (!ioaddr) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); return -ENODEV; @@ -1505,8 +1506,6 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); return err; } - - ioaddr = pci_resource_start(pdev, 0); if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2050c157eb74..23fc244eb8a4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -494,9 +494,7 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; - /* Deactivate the Tx timer */ del_timer_sync(&channel->tx_timer); - channel->tx_timer_active = 0; } } @@ -1976,14 +1974,6 @@ read_again: buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; - if (buf2_len > rdata->rx.buf.dma_len) { - /* Hardware inconsistency within the descriptors - * that has resulted in a length underflow. - */ - error = 1; - goto skip_data; - } - if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2013,10 +2003,8 @@ skip_data: if (!last || context_next) goto read_again; - if (!skb || error) { - dev_kfree_skb(skb); + if (!skb) goto next_packet; - } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 6b5b4d2843d4..52a6b16f57d2 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -19,7 +19,6 @@ config ARC_EMAC_CORE tristate select MII select PHYLIB - select CRC32 config ARC_EMAC tristate "ARC EMAC support" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c76102754c22..94f06c35ad9c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -90,13 +90,9 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, struct dma_desc *desc, unsigned int port) { - unsigned long desc_flags; - /* Ports are latched, so write upper address first */ - spin_lock_irqsave(&priv->desc_lock, desc_flags); tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); - spin_unlock_irqrestore(&priv->desc_lock, desc_flags); } /* Ethtool operations */ @@ -1612,7 +1608,6 @@ static int bcm_sysport_open(struct net_device *dev) } /* Initialize both hardware and software ring */ - spin_lock_init(&priv->desc_lock); for (i = 0; i < dev->num_tx_queues; i++) { ret = bcm_sysport_init_tx_ring(priv, i); if (ret) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index bb484c7faf67..e668b1ce5828 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -660,7 +660,6 @@ struct bcm_sysport_priv { int wol_irq; /* Transmit rings */ - spinlock_t desc_lock; struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; /* Receive queue */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 1616647719ba..8fc3f3c137f8 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8234,9 +8234,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) BNX2_WR(bp, PCI_COMMAND, reg); } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && !(bp->flags & BNX2_FLAG_PCIX)) { + dev_err(&pdev->dev, "5706 A1 can only be used in a PCIX bus, aborting\n"); - rc = -EPERM; goto err_out_unmap; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dad830ee9d5b..13de5ce3facf 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2690,8 +2690,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) } /* Allocated memory for FW statistics */ - rc = bnx2x_alloc_fw_stats_mem(bp); - if (rc) + if (bnx2x_alloc_fw_stats_mem(bp)) LOAD_ERROR_EXIT(bp, load_error0); /* request pf to initialize status blocks */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index fc7fce642666..1835d2e451c0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -635,13 +635,11 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, { int i, rc; struct bnx2x_ilt *ilt = BP_ILT(bp); - struct ilt_client_info *ilt_cli; + struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; if (!ilt || !ilt->lines) return -1; - ilt_cli = &ilt->clients[cli_num]; - if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM)) return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 38fae8802663..55a7774e8ef5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1245,10 +1245,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, goto failed; /* SR-IOV capability was enabled but there are no VFs*/ - if (iov->total == 0) { - err = 0; + if (iov->total == 0) goto failed; - } iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7444f17b9e05..250ecbcca019 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4313,10 +4313,9 @@ static void bnxt_tx_disable(struct bnxt *bp) txr->dev_state = BNXT_DEV_STATE_CLOSING; } } - /* Drop carrier first to prevent TX timeout */ - netif_carrier_off(bp->dev); /* Stop all TX queues */ netif_tx_disable(bp->dev); + netif_carrier_off(bp->dev); } static void bnxt_tx_enable(struct bnxt *bp) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index df946dd360e6..bae8df951780 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1094,7 +1094,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | - EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK); + EXT_PWR_DOWN_BIAS); /* fallthrough */ case GENET_POWER_CABLE_SENSE: /* enable APD */ @@ -2663,21 +2663,15 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, /* Returns a reusable dma control register value */ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) { - unsigned int i; u32 reg; u32 dma_ctrl; /* disable DMA */ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; - for (i = 0; i < priv->hw_params->tx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_tdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; - for (i = 0; i < priv->hw_params->rx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_rdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_rdma_writel(priv, reg, DMA_CTRL); @@ -2908,6 +2902,12 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); + if (priv->internal_phy) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); @@ -3443,12 +3443,10 @@ static int bcmgenet_probe(struct platform_device *pdev) /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; - if (priv->wol_irq > 0) { - err = devm_request_irq(&pdev->dev, priv->wol_irq, - bcmgenet_wol_isr, 0, dev->name, priv); - if (!err) - device_set_wakeup_capable(&pdev->dev, 1); - } + err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, + dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); /* Set the needed headroom to account for any possible * features enabling/disabling at runtime @@ -3520,10 +3518,8 @@ static int bcmgenet_probe(struct platform_device *pdev) clk_disable_unprepare(priv->clk); err = register_netdev(dev); - if (err) { - bcmgenet_mii_exit(dev); + if (err) goto err; - } return err; @@ -3597,6 +3593,7 @@ static int bcmgenet_resume(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); unsigned long dma_ctrl; int ret; + u32 reg; if (!netif_running(dev)) return 0; @@ -3631,6 +3628,12 @@ static int bcmgenet_resume(struct device *d) bcmgenet_set_hw_addr(priv, dev->dev_addr); + if (priv->internal_phy) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg |= EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + if (priv->wolopts) bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index df107ed67220..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -167,6 +167,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + if (priv->hw_params->flags & GENET_HAS_EXT) { + reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + reg &= ~EXT_ENERGY_DET_MASK; + bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); + } + /* Enable the MPD interrupt */ cpu_mask_clear = UMAC_IRQ_MPD_R; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index d1cdb8540e12..78803e7de360 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -1955,9 +1955,6 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) struct gem_stats *hwstat = &bp->hw_stats.gem; struct net_device_stats *nstat = &bp->stats; - if (!netif_running(bp->dev)) - return nstat; - gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h index 3ab84d18ad3a..5e3aff242ad3 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -417,7 +417,7 @@ | CN6XXX_INTR_M0UNWI_ERR \ | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UPWI_ERR \ - | CN6XXX_INTR_M1UNB0_ERR \ + | CN6XXX_INTR_M1UPB0_ERR \ | CN6XXX_INTR_M1UNWI_ERR \ | CN6XXX_INTR_INSTR_DB_OF_ERR \ | CN6XXX_INTR_SLIST_DB_OF_ERR \ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 5da49e8b533b..912ee28ab58b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -528,7 +528,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; - mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) | + mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 79eb2257a30e..f5f1b0b51ebd 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -1133,7 +1133,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!adapter->registered_device_map) { pr_err("%s: could not register any net devices\n", pci_name(pdev)); - err = -EINVAL; goto out_release_adapter_res; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index f012649891da..e4b5b057f417 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -3111,7 +3111,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, GFP_KERNEL | __GFP_COMP); if (!avail) { CH_ALERT(adapter, "free list queue 0 initialization failed\n"); - ret = -ENOMEM; goto err; } if (avail < q->fl[0].size) diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 666b982a7bd7..a22768c94200 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3643,8 +3643,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); - if (!adapter->params.pci.vpd_cap_addr) - return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index ca3def824d4b..54d5e53e94af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2263,7 +2263,7 @@ do { \ seq_printf(seq, "%-12s", s); \ for (i = 0; i < n; ++i) \ seq_printf(seq, " %16" fmt_spec, v); \ - seq_putc(seq, '\n'); \ + seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) #define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index b8d82f32b2db..cf94b72dbacd 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -144,8 +144,6 @@ struct board_info { u32 wake_state; int ip_summed; - - struct regulator *power_supply; }; /* debug code */ @@ -1464,7 +1462,7 @@ dm9000_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "failed to request reset gpio %d: %d\n", reset_gpios, ret); - goto out_regulator_disable; + return -ENODEV; } /* According to manual PWRST# Low Period Min 1ms */ @@ -1476,18 +1474,14 @@ dm9000_probe(struct platform_device *pdev) if (!pdata) { pdata = dm9000_parse_dt(&pdev->dev); - if (IS_ERR(pdata)) { - ret = PTR_ERR(pdata); - goto out_regulator_disable; - } + if (IS_ERR(pdata)) + return PTR_ERR(pdata); } /* Init network device */ ndev = alloc_etherdev(sizeof(struct board_info)); - if (!ndev) { - ret = -ENOMEM; - goto out_regulator_disable; - } + if (!ndev) + return -ENOMEM; SET_NETDEV_DEV(ndev, &pdev->dev); @@ -1498,8 +1492,6 @@ dm9000_probe(struct platform_device *pdev) db->dev = &pdev->dev; db->ndev = ndev; - if (!IS_ERR(power)) - db->power_supply = power; spin_lock_init(&db->lock); mutex_init(&db->addr_lock); @@ -1718,10 +1710,6 @@ out: dm9000_release_board(pdev, db); free_netdev(ndev); -out_regulator_disable: - if (!IS_ERR(power)) - regulator_disable(power); - return ret; } @@ -1781,13 +1769,10 @@ static int dm9000_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct board_info *dm = to_dm9000_board(ndev); unregister_netdev(ndev); - dm9000_release_board(pdev, dm); + dm9000_release_board(pdev, netdev_priv(ndev)); free_netdev(ndev); /* free device structure */ - if (dm->power_supply) - regulator_disable(dm->power_supply); dev_dbg(&pdev->dev, "released and freed device\n"); return 0; diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index ffc9c7947b93..7799cf33cc6e 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -4701,10 +4701,6 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) lp->ibn = 3; lp->active = *p++; if (MOTO_SROM_BUG) lp->active = 0; - /* if (MOTO_SROM_BUG) statement indicates lp->active could - * be 8 (i.e. the size of array lp->phy) */ - if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy))) - return -EINVAL; lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; @@ -4996,23 +4992,19 @@ mii_get_phy(struct net_device *dev) } if ((j == limit) && (i < DE4X5_MAX_MII)) { for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); - if (k < DE4X5_MAX_PHY) { - lp->phy[k].addr = i; - lp->phy[k].id = id; - lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ - lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ - lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ - lp->mii_cnt++; - lp->active++; - printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); - j = de4x5_debug; - de4x5_debug |= DEBUG_MII; - de4x5_dbg_mii(dev, k); - de4x5_debug = j; - printk("\n"); - } else { - goto purgatory; - } + lp->phy[k].addr = i; + lp->phy[k].id = id; + lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ + lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ + lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ + lp->mii_cnt++; + lp->active++; + printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); + j = de4x5_debug; + de4x5_debug |= DEBUG_MII; + de4x5_dbg_mii(dev, k); + de4x5_debug = j; + printk("\n"); } } purgatory: diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index abc66eb13c35..3c0e4d5c5fef 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -368,7 +368,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; void __iomem *ioaddr; - i = pcim_enable_device(pdev); + i = pci_enable_device(pdev); if (i) return i; pci_set_master(pdev); @@ -390,7 +390,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); if (!ioaddr) - goto err_out_netdev; + goto err_out_free_res; for (i = 0; i < 3; i++) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); @@ -469,6 +469,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_cleardev: pci_iounmap(pdev, ioaddr); +err_out_free_res: + pci_release_regions(pdev); err_out_netdev: free_netdev (dev); return -ENODEV; @@ -1535,6 +1537,7 @@ static void w840_remove1(struct pci_dev *pdev) if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev(dev); + pci_release_regions(pdev); pci_iounmap(pdev, np->base_addr); free_netdev(dev); } diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 0ade0c6d81ee..f7b42483921c 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -589,12 +589,10 @@ static void ec_bhf_remove(struct pci_dev *dev) struct ec_bhf_priv *priv = netdev_priv(net_dev); unregister_netdev(net_dev); + free_netdev(net_dev); pci_iounmap(dev, priv->dma_io); pci_iounmap(dev, priv->io); - - free_netdev(net_dev); - pci_release_regions(dev); pci_clear_master(dev); pci_disable_device(dev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 398b9bd09400..7cd39324106d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5878,7 +5878,6 @@ drv_cleanup: unmap_bars: be_unmap_pci_bars(adapter); free_netdev: - pci_disable_pcie_error_reporting(pdev); free_netdev(netdev); rel_reg: pci_release_regions(pdev); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 2cb20d8e6bdf..b1026689b78f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -586,7 +586,7 @@ static s32 nps_enet_probe(struct platform_device *pdev) /* Get IRQ number */ priv->irq = platform_get_irq(pdev, 0); - if (priv->irq < 0) { + if (!priv->irq) { dev_err(dev, "failed to retrieve value from device tree\n"); err = -ENODEV; goto out_netdev; @@ -621,8 +621,8 @@ static s32 nps_enet_remove(struct platform_device *pdev) struct nps_enet_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); - netif_napi_del(&priv->napi); free_netdev(ndev); + netif_napi_del(&priv->napi); return 0; } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 7bbf85e98974..99d33e2d35e6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -360,9 +360,6 @@ struct bufdesc_ex { #define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */ #define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) #define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) -#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \ - (((X) == 1) ? FEC_ENET_RXF_1 : \ - FEC_ENET_RXF_2)) #define FEC_ENET_TS_AVAIL ((uint)0x00010000) #define FEC_ENET_TS_TIMER ((uint)0x00008000) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 61ab471de4bb..9c608211fcfd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1407,7 +1407,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 031d4b3a544c..f9e74461bdc0 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -396,16 +396,9 @@ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; - mutex_lock(&adapter->ptp_clk_mutex); - /* Check the ptp clock */ - if (!adapter->ptp_clk_on) { - mutex_unlock(&adapter->ptp_clk_mutex); - return -EINVAL; - } spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - mutex_unlock(&adapter->ptp_clk_mutex); *ts = ns_to_timespec64(ns); @@ -586,10 +579,6 @@ void fec_ptp_init(struct platform_device *pdev) fep->ptp_caps.enable = fec_ptp_enable; fep->cycle_speed = clk_get_rate(fep->clk_ptp); - if (!fep->cycle_speed) { - fep->cycle_speed = NSEC_PER_SEC; - dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n"); - } fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; spin_lock_init(&fep->tmreg_lock); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index cfae74d8e659..68a428de0bc0 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -231,4 +231,3 @@ static struct platform_driver fs_enet_bb_mdio_driver = { }; module_platform_driver(fs_enet_bb_mdio_driver); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 3b6232a6a56d..2be383e6d258 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -232,4 +232,3 @@ static struct platform_driver fs_enet_fec_mdio_driver = { }; module_platform_driver(fs_enet_fec_mdio_driver); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index fb135797688a..bc00fa5e864f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -485,11 +485,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev) static int gfar_set_mac_addr(struct net_device *dev, void *p) { - int ret; - - ret = eth_mac_addr(dev, p); - if (ret) - return ret; + eth_mac_addr(dev, p); gfar_set_mac_for_addr(dev, 0, dev->dev_addr); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index faf8dfb49fb5..96a1f62cc148 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3934,10 +3934,10 @@ static int ucc_geth_remove(struct platform_device* ofdev) struct ucc_geth_private *ugeth = netdev_priv(dev); unregister_netdev(dev); + free_netdev(dev); ucc_geth_memclean(ugeth); of_node_put(ugeth->ug_info->tbi_node); of_node_put(ugeth->ug_info->phy_node); - free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 1a40a5f11081..75f337163ce3 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -580,14 +580,7 @@ struct ucc_geth_tx_global_pram { u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ u32 tqptr; /* a base pointer to the Tx Queues Memory Region */ - u8 res2[0x78 - 0x74]; - u64 snums_en; - u32 l2l3baseptr; /* top byte consists of a few other bit fields */ - - u16 mtu[8]; - u8 res3[0xa8 - 0x94]; - u32 wrrtablebase; /* top byte is reserved */ - u8 res4[0xc0 - 0xac]; + u8 res2[0x80 - 0x74]; } __packed; /* structure representing Extended Filtering Global Parameters in PRAM */ diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index 171bfaa536b7..a15b4a97c172 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -304,10 +304,9 @@ err_ioremap: static int xgmac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); - struct mdio_fsl_priv *priv = bus->priv; mdiobus_unregister(bus); - iounmap(priv->mdio_base); + iounmap(bus->priv); mdiobus_free(bus); return 0; diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index fb1a9bd24660..a7139f588ad2 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -548,11 +548,6 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) return -1; base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); - if (!base) { - pcmcia_release_window(link, link->resource[2]); - return -1; - } - pcmcia_map_mem_page(link, link->resource[2], 0); /* diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 82f179b33a80..a2f2db58b5ab 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -433,10 +433,6 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, /* for mutl buffer*/ new_skb = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb_any(skb); - if (!new_skb) { - netdev_err(ndev, "skb alloc failed\n"); - return; - } skb = new_skb; check_ok = 0; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 1500027ae18b..7ce6379fd1a3 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit) err = -ENODEV; goto out; } - memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */ + memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ dev->base_addr = MVME_I596_BASE; dev->irq = (unsigned) MVME16x_IRQ_I596; goto found; diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index e4128e151b85..2af7f77345fb 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -122,10 +122,9 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->dev_addr[5] = readb(eth_addr + 0x06); iounmap(eth_addr); - if (netdevice->irq < 0) { + if (!netdevice->irq) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, netdevice->base_addr); - retval = netdevice->irq; goto probe_failed; } diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 43fc6d370457..efe84ca20da7 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2654,8 +2654,10 @@ static int ehea_restart_qps(struct net_device *dev) u16 dummy16 = 0; cb0 = (void *)get_zeroed_page(GFP_KERNEL); - if (!cb0) - return -ENOMEM; + if (!cb0) { + ret = -ENOMEM; + goto out; + } for (i = 0; i < (port->num_def_qps); i++) { struct ehea_port_res *pr = &port->port_res[i]; @@ -2675,7 +2677,6 @@ static int ehea_restart_qps(struct net_device *dev) cb0); if (hret != H_SUCCESS) { netdev_err(dev, "query_ehea_qp failed (1)\n"); - ret = -EFAULT; goto out; } @@ -2688,7 +2689,6 @@ static int ehea_restart_qps(struct net_device *dev) &dummy64, &dummy16, &dummy16); if (hret != H_SUCCESS) { netdev_err(dev, "modify_ehea_qp failed (1)\n"); - ret = -EFAULT; goto out; } @@ -2697,7 +2697,6 @@ static int ehea_restart_qps(struct net_device *dev) cb0); if (hret != H_SUCCESS) { netdev_err(dev, "query_ehea_qp failed (2)\n"); - ret = -EFAULT; goto out; } diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index aa556e4f9051..93c29094ceff 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1423,7 +1423,7 @@ static int e100_phy_check_without_mii(struct nic *nic) u8 phy_type; int without_mii; - phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; + phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f; switch (phy_type) { case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ @@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && - (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { /* enable/disable MDI/MDI-X auto-switching. */ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); @@ -2298,9 +2298,9 @@ static int e100_asf(struct nic *nic) { /* ASF can be enabled from eeprom */ return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && - (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && - !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && - ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); + (nic->eeprom[eeprom_config_asf] & eeprom_asf) && + !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && + ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE); } static int e100_up(struct nic *nic) @@ -2462,15 +2462,11 @@ static void e100_get_drvinfo(struct net_device *netdev, sizeof(info->bus_info)); } -#define E100_PHY_REGS 0x1D +#define E100_PHY_REGS 0x1C static int e100_get_regs_len(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); - - /* We know the number of registers, and the size of the dump buffer. - * Calculate the total size in bytes. - */ - return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); + return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); } static void e100_get_regs(struct net_device *netdev, @@ -2484,18 +2480,14 @@ static void e100_get_regs(struct net_device *netdev, buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | ioread8(&nic->csr->scb.cmd_lo) << 16 | ioread16(&nic->csr->scb.status); - for (i = 0; i < E100_PHY_REGS; i++) - /* Note that we read the registers in reverse order. This - * ordering is the ABI apparently used by ethtool and other - * applications. - */ - buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, - E100_PHY_REGS - 1 - i); + for (i = E100_PHY_REGS; i >= 0; i--) + buff[1 + E100_PHY_REGS - i] = + mdio_read(netdev, nic->mii.phy_id, i); memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); e100_exec_cb(nic, NULL, e100_dump); msleep(10); - memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, - sizeof(nic->mem->dump_buf)); + memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, + sizeof(nic->mem->dump_buf)); } static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2960,7 +2952,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Wol magic packet can be enabled from eeprom */ if ((nic->mac >= mac_82558_D101_A4) && - (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { + (nic->eeprom[eeprom_id] & eeprom_id_wol)) { nic->flags |= wol_magic; device_set_wakeup_enable(&pdev->dev, true); } diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index e486f351a54a..5f7016442ec4 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -917,8 +917,6 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); - if (ret_val) - return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index cd7403d09c3d..485b9cc53f8b 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1010,8 +1010,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ - u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { @@ -1065,17 +1063,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); - lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * - (1U << (E1000_LTRV_SCALE_FACTOR * - ((lat_enc & E1000_LTRV_SCALE_MASK) - >> E1000_LTRV_SCALE_SHIFT))); - - max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * - (1U << (E1000_LTRV_SCALE_FACTOR * - ((max_ltr_enc & E1000_LTRV_SCALE_MASK) - >> E1000_LTRV_SCALE_SHIFT))); - - if (lat_enc_d > max_ltr_enc_d) + if (lat_enc > max_ltr_enc) lat_enc = max_ltr_enc; } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 3a16c457c8dd..34c551e322eb 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -284,11 +284,8 @@ /* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 -#define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 -#define E1000_LTRV_SCALE_SHIFT 10 -#define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a8ee20ecb3ad..3bd0bdbdfa0e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5875,19 +5875,15 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); - rtnl_lock(); /* don't run the task if already down */ - if (test_bit(__E1000_DOWN, &adapter->state)) { - rtnl_unlock(); + if (test_bit(__E1000_DOWN, &adapter->state)) return; - } if (!(adapter->flags & FLAG_RESTART_NOW)) { e1000e_dump(adapter); e_err("Reset adapter unexpectedly\n"); } e1000e_reinit_locked(adapter); - rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e58db67495d7..756c4ea17655 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5360,27 +5360,6 @@ int i40e_open(struct net_device *netdev) return 0; } -/** - * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues - * @vsi: vsi structure - * - * This updates netdev's number of tx/rx queues - * - * Returns status of setting tx/rx queues - **/ -static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi) -{ - int ret; - - ret = netif_set_real_num_rx_queues(vsi->netdev, - vsi->num_queue_pairs); - if (ret) - return ret; - - return netif_set_real_num_tx_queues(vsi->netdev, - vsi->num_queue_pairs); -} - /** * i40e_vsi_open - * @vsi: the VSI to open @@ -5415,7 +5394,13 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_setup_rx; /* Notify the stack of the actual queue counts. */ - err = i40e_netif_set_realnum_tx_rx_queues(vsi); + err = netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); if (err) goto err_set_queues; @@ -5424,8 +5409,6 @@ int i40e_vsi_open(struct i40e_vsi *vsi) dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); - if (err) - goto err_setup_rx; } else { err = -EINVAL; @@ -6589,7 +6572,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf) if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { /* retry with a larger buffer */ buf_len = data_size; - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { dev_info(&pf->pdev->dev, "capability discovery failed, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), @@ -8165,7 +8148,6 @@ static int i40e_sw_init(struct i40e_pf *pf) { int err = 0; int size; - u16 pow; pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); @@ -8200,11 +8182,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); - - /* find the next higher power-of-2 of num cpus */ - pow = roundup_pow_of_two(num_online_cpus()); - pf->rss_size_max = min_t(int, pf->rss_size_max, pow); - if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); @@ -9428,9 +9405,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, case I40E_VSI_VMDQ2: case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); - if (ret) - goto err_netdev; - ret = i40e_netif_set_realnum_tx_rx_queues(vsi); if (ret) goto err_netdev; ret = register_netdev(vsi->netdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8fdbc24b3cba..5f03ab3dfa19 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2503,7 +2503,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6ccbf21547d0..a4aa4d10ca70 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -945,7 +945,6 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { - unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; @@ -954,13 +953,7 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out; - if (num_q_vectors > MAX_Q_VECTORS) { - num_q_vectors = MAX_Q_VECTORS; - dev_warn(&adapter->pdev->dev, - "The number of queue vectors (%d) is higher than max allowed (%d)\n", - adapter->num_q_vectors, MAX_Q_VECTORS); - } - for (i = 0; i < num_q_vectors; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -5428,6 +5421,8 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); + else + dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); ack |= TSINTR_SYS_WRAP; } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ab080118201d..519b72c41888 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2793,7 +2793,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_hw_init: - netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 88f5c45d9eef..b491de946a0e 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -216,7 +216,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); spin_unlock_irqrestore(&lp->lock, flags); - return NETDEV_TX_OK; + return NETDEV_TX_BUSY; } } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d6b25aba4004..575da945f151 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -90,7 +90,7 @@ #define MVNETA_DESC_SWAP BIT(6) #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) #define MVNETA_PORT_STATUS 0x2444 -#define MVNETA_TX_IN_PRGRS BIT(0) +#define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c #define MVNETA_SERDES_CFG 0x24A0 diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 9986f88618bd..7ace07dad6a3 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1577,8 +1577,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); - cancel_work_sync(&pep->tx_timeout_task); unregister_netdev(dev); + cancel_work_sync(&pep->tx_timeout_task); free_netdev(dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index b3232a9ec311..b52491f09a4d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -47,7 +47,7 @@ #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) -int mlx4_en_moderation_update(struct mlx4_en_priv *priv) +static int mlx4_en_moderation_update(struct mlx4_en_priv *priv) { int i; int err = 0; @@ -1907,6 +1907,8 @@ static int mlx4_en_set_tunable(struct net_device *dev, return ret; } +#define MLX4_EEPROM_PAGE_LEN 256 + static int mlx4_en_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { @@ -1941,7 +1943,7 @@ static int mlx4_en_get_module_info(struct net_device *dev, break; case MLX4_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN; break; default: return -ENOSYS; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b9014ec73b67..7d61a5de9d5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -311,9 +311,6 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, int nhoff = skb_network_offset(skb); int ret = 0; - if (skb->encapsulation) - return -EPROTONOSUPPORT; - if (skb->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; @@ -1316,10 +1313,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } priv->port_stats.tx_timeout++; - if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) { - en_dbg(DRV, priv, "Scheduling port restart\n"); - queue_work(mdev->workqueue, &priv->restart_task); - } + en_dbg(DRV, priv, "Scheduling watchdog\n"); + queue_work(mdev->workqueue, &priv->watchdog_task); } @@ -1735,7 +1730,6 @@ int mlx4_en_start_port(struct net_device *dev) local_bh_enable(); } - clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state); netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -1897,7 +1891,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - restart_task); + watchdog_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; @@ -2127,7 +2121,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) { mutex_lock(&mdev->state_lock); if (!mdev->device_up) { - /* NIC is probably restarting - let restart task reset + /* NIC is probably restarting - let watchdog task reset * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { @@ -2136,9 +2130,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); - if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, - &priv->state)) - queue_work(mdev->workqueue, &priv->restart_task); + queue_work(mdev->workqueue, &priv->watchdog_task); } } mutex_unlock(&mdev->state_lock); @@ -2858,7 +2850,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); - INIT_WORK(&priv->restart_task, mlx4_en_restart); + INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); @@ -3191,8 +3183,6 @@ int mlx4_en_reset_config(struct net_device *dev, en_err(priv, "Failed starting port\n"); } - if (!err) - err = mlx4_en_moderation_update(priv); out: mutex_unlock(&mdev->state_lock); netdev_features_change(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 913e0fd10fde..b774ba64bd4b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3222,7 +3222,6 @@ slave_start: if (!SRIOV_VALID_STATE(dev->flags)) { mlx4_err(dev, "Invalid SRIOV state\n"); - err = -EINVAL; goto err_close; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index b6cd733791a8..607daaffae98 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -495,10 +495,6 @@ struct mlx4_en_stats_bitmap { struct mutex mutex; /* for mutual access to stats bitmap */ }; -enum { - MLX4_EN_STATE_FLAG_RESTARTING, -}; - struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -564,7 +560,7 @@ struct mlx4_en_priv { struct mlx4_en_cq *rx_cq[MAX_RX_RINGS]; struct mlx4_qp drop_qp; struct work_struct rx_mode_task; - struct work_struct restart_task; + struct work_struct watchdog_task; struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; @@ -609,7 +605,6 @@ struct mlx4_en_priv { u32 pflags; u8 rss_key[MLX4_EN_RSS_KEY_SIZE]; u8 rss_hash_fn; - unsigned long state; }; enum mlx4_en_wol { @@ -839,7 +834,6 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); #define DEV_FEATURE_CHANGED(dev, new_features, feature) \ ((dev->features & feature) ^ (new_features & feature)) -int mlx4_en_moderation_update(struct mlx4_en_priv *priv); int mlx4_en_reset_config(struct net_device *dev, struct hwtstamp_config ts_config, netdev_features_t new_features); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 519dafeb5b0b..d764081ef675 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1283,7 +1283,6 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); #define I2C_ADDR_LOW 0x50 #define I2C_ADDR_HIGH 0x51 #define I2C_PAGE_SIZE 256 -#define I2C_HIGH_PAGE_SIZE 128 /* Module Info Data */ struct mlx4_cable_info { @@ -1337,88 +1336,6 @@ static inline const char *cable_info_mad_err_str(u16 mad_status) return "Unknown Error"; } -static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id) -{ - struct mlx4_cmd_mailbox *inbox, *outbox; - struct mlx4_mad_ifc *inmad, *outmad; - struct mlx4_cable_info *cable_info; - int ret; - - inbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(inbox)) - return PTR_ERR(inbox); - - outbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(outbox)) { - mlx4_free_cmd_mailbox(dev, inbox); - return PTR_ERR(outbox); - } - - inmad = (struct mlx4_mad_ifc *)(inbox->buf); - outmad = (struct mlx4_mad_ifc *)(outbox->buf); - - inmad->method = 0x1; /* Get */ - inmad->class_version = 0x1; - inmad->mgmt_class = 0x1; - inmad->base_version = 0x1; - inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ - - cable_info = (struct mlx4_cable_info *)inmad->data; - cable_info->dev_mem_address = 0; - cable_info->page_num = 0; - cable_info->i2c_addr = I2C_ADDR_LOW; - cable_info->size = cpu_to_be16(1); - - ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); - if (ret) - goto out; - - if (be16_to_cpu(outmad->status)) { - /* Mad returned with bad status */ - ret = be16_to_cpu(outmad->status); - mlx4_warn(dev, - "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", - 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret, - cable_info_mad_err_str(ret)); - ret = -ret; - goto out; - } - cable_info = (struct mlx4_cable_info *)outmad->data; - *module_id = cable_info->data[0]; -out: - mlx4_free_cmd_mailbox(dev, inbox); - mlx4_free_cmd_mailbox(dev, outbox); - return ret; -} - -static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset) -{ - *i2c_addr = I2C_ADDR_LOW; - *page_num = 0; - - if (*offset < I2C_PAGE_SIZE) - return; - - *i2c_addr = I2C_ADDR_HIGH; - *offset -= I2C_PAGE_SIZE; -} - -static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset) -{ - /* Offsets 0-255 belong to page 0. - * Offsets 256-639 belong to pages 01, 02, 03. - * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2 - */ - if (*offset < I2C_PAGE_SIZE) - *page_num = 0; - else - *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE; - *i2c_addr = I2C_ADDR_LOW; - *offset -= *page_num * I2C_HIGH_PAGE_SIZE; -} - /** * mlx4_get_module_info - Read cable module eeprom data * @dev: mlx4_dev. @@ -1438,30 +1355,12 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, struct mlx4_cmd_mailbox *inbox, *outbox; struct mlx4_mad_ifc *inmad, *outmad; struct mlx4_cable_info *cable_info; - u8 module_id, i2c_addr, page_num; + u16 i2c_addr; int ret; if (size > MODULE_INFO_MAX_READ) size = MODULE_INFO_MAX_READ; - ret = mlx4_get_module_id(dev, port, &module_id); - if (ret) - return ret; - - switch (module_id) { - case MLX4_MODULE_ID_SFP: - mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); - break; - case MLX4_MODULE_ID_QSFP: - case MLX4_MODULE_ID_QSFP_PLUS: - case MLX4_MODULE_ID_QSFP28: - mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); - break; - default: - mlx4_err(dev, "Module ID not recognized: %#x\n", module_id); - return -EINVAL; - } - inbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(inbox)) return PTR_ERR(inbox); @@ -1487,9 +1386,11 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, */ size -= offset + size - I2C_PAGE_SIZE; + i2c_addr = I2C_ADDR_LOW; + cable_info = (struct mlx4_cable_info *)inmad->data; cable_info->dev_mem_address = cpu_to_be16(offset); - cable_info->page_num = page_num; + cable_info->page_num = 0; cable_info->i2c_addr = i2c_addr; cable_info->size = cpu_to_be16(size); diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index d691c33dffc6..cb0102dd7f70 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -1150,10 +1150,6 @@ static int ks8842_probe(struct platform_device *pdev) unsigned i; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iomem) { - dev_err(&pdev->dev, "Invalid resource\n"); - return -EINVAL; - } if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) goto err_mem_region; diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index b5de665ce718..f3bb9055a292 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -500,19 +500,13 @@ static struct regmap_bus phymap_encx24j600 = { .reg_read = regmap_encx24j600_phy_reg_read, }; -int devm_regmap_init_encx24j600(struct device *dev, - struct encx24j600_context *ctx) +void devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx) { mutex_init(&ctx->mutex); regcfg.lock_arg = ctx; ctx->regmap = devm_regmap_init(dev, ®map_encx24j600, ctx, ®cfg); - if (IS_ERR(ctx->regmap)) - return PTR_ERR(ctx->regmap); ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg); - if (IS_ERR(ctx->phymap)) - return PTR_ERR(ctx->phymap); - - return 0; } EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600); diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index eea9218a169c..2056b719c262 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1026,13 +1026,10 @@ static int encx24j600_spi_probe(struct spi_device *spi) priv->speed = SPEED_100; priv->ctx.spi = spi; + devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); ndev->irq = spi->irq; ndev->netdev_ops = &encx24j600_netdev_ops; - ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); - if (ret) - goto out_free; - mutex_init(&priv->lock); /* Reset device and check if it is connected */ diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h index c9b17ccf749c..4be73d5553f8 100644 --- a/drivers/net/ethernet/microchip/encx24j600_hw.h +++ b/drivers/net/ethernet/microchip/encx24j600_hw.h @@ -14,8 +14,8 @@ struct encx24j600_context { int bank; }; -int devm_regmap_init_encx24j600(struct device *dev, - struct encx24j600_context *ctx); +void devm_regmap_init_encx24j600(struct device *dev, + struct encx24j600_context *ctx); /* Single-byte instructions */ #define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1)) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 295b5176bcf1..374e691b11da 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -518,8 +518,10 @@ static int moxart_mac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); ret = register_netdev(ndev); - if (ret) + if (ret) { + free_netdev(ndev); goto init_fail; + } netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", __func__, ndev->irq, ndev->dev_addr); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 9ecb99a1de35..8ebf3611aba3 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -4051,7 +4051,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "invalid sram_size %dB or board span %ldB\n", mgp->sram_size, mgp->board_span); - status = -EINVAL; goto abort_with_ioremap; } memcpy_fromio(mgp->eeprom_strings, diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 58527a2ec455..122c2ee3dfe2 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -817,7 +817,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) printk(version); #endif - i = pcim_enable_device(pdev); + i = pci_enable_device(pdev); if (i) return i; /* natsemi has a non-standard PM control register @@ -850,7 +850,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ioaddr = ioremap(iostart, iosize); if (!ioaddr) { i = -ENOMEM; - goto err_pci_request_regions; + goto err_ioremap; } /* Work around the dropped serial bit. */ @@ -968,6 +968,9 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) err_register_netdev: iounmap(ioaddr); + err_ioremap: + pci_release_regions(pdev); + err_pci_request_regions: free_netdev(dev); return i; @@ -3225,6 +3228,7 @@ static void natsemi_remove1(struct pci_dev *pdev) NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround); unregister_netdev (dev); + pci_release_regions (pdev); iounmap(ioaddr); free_netdev (dev); } diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 9b041848b389..7007d212f3e4 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -128,7 +128,7 @@ static const struct net_device_ops xtsonic_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int sonic_probe1(struct net_device *dev) +static int __init sonic_probe1(struct net_device *dev) { static unsigned version_printed = 0; unsigned int silicon_revision; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 395828830b57..9ba975853ec6 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8625,7 +8625,7 @@ static void s2io_io_resume(struct pci_dev *pdev) return; } - if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) { + if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) { s2io_card_down(sp); pr_err("Can't restore mac addr after reset.\n"); return; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index c6950e580883..e0993eba5df3 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3539,13 +3539,13 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) kfree(vdev->vpaths); + /* we are safe to free it now */ + free_netdev(dev); + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, __func__, __LINE__); - - /* we are safe to free it now */ - free_netdev(dev); } /* diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 45cfb1a0933d..3b98b263bad0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -124,7 +124,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) { u8 *data = skb->data; unsigned int offset; - u16 hi, id; + u16 *hi, *id; u32 lo; if (ptp_classify_raw(skb) == PTP_CLASS_NONE) @@ -135,11 +135,14 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) return 0; - hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0); - lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2); - id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID); + hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); + id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); - return (uid_hi == hi && uid_lo == lo && seqid == id); + memcpy(&lo, &hi[1], sizeof(lo)); + + return (uid_hi == *hi && + uid_lo == lo && + seqid == *id); } static void @@ -149,6 +152,7 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) struct pci_dev *pdev; u64 ns; u32 hi, lo, val; + u16 uid, seq; if (!adapter->hwts_rx_en) return; @@ -164,7 +168,10 @@ pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb) lo = pch_src_uuid_lo_read(pdev); hi = pch_src_uuid_hi_read(pdev); - if (!pch_ptp_match(skb, hi, lo, hi >> 16)) + uid = hi & 0xffff; + seq = (hi >> 16) & 0xffff; + + if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) goto out; ns = pch_rx_snap_read(pdev); @@ -2618,13 +2625,9 @@ static int pch_gbe_probe(struct pci_dev *pdev, adapter->pdev = pdev; adapter->hw.back = adapter; adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR]; - adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data; - if (adapter->pdata && adapter->pdata->platform_init) { - ret = adapter->pdata->platform_init(pdev); - if (ret) - goto err_free_netdev; - } + if (adapter->pdata && adapter->pdata->platform_init) + adapter->pdata->platform_init(pdev); adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, PCI_DEVFN(12, 4)); @@ -2714,7 +2717,7 @@ err_free_netdev: */ static int pch_gbe_minnow_platform_init(struct pci_dev *pdev) { - unsigned long flags = GPIOF_OUT_INIT_HIGH; + unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT; unsigned gpio = MINNOW_PHY_RESET_GPIO; int ret; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 56326a56068b..48106953cf64 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1129,20 +1129,16 @@ static int pasemi_mac_open(struct net_device *dev) mac->tx = pasemi_mac_setup_tx_resources(dev); - if (!mac->tx) { - ret = -ENOMEM; + if (!mac->tx) goto out_tx_ring; - } /* We might already have allocated rings in case mtu was changed * before interface was brought up. */ if (dev->mtu > 1500 && !mac->num_cs) { pasemi_mac_setup_csrings(mac); - if (!mac->num_cs) { - ret = -ENOMEM; + if (!mac->num_cs) goto out_tx_ring; - } } /* Zero out rmon counters */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index f89441f9bd8d..6409a06bbdf6 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -586,6 +586,11 @@ static const struct net_device_ops netxen_netdev_ops = { #endif }; +static inline bool netxen_function_zero(struct pci_dev *pdev) +{ + return (PCI_FUNC(pdev->devfn) == 0) ? true : false; +} + static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, u32 mode) { @@ -681,7 +686,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter) netxen_initialize_interrupt_registers(adapter); netxen_set_msix_bit(pdev, 0); - if (adapter->portnum == 0) { + if (netxen_function_zero(pdev)) { if (!netxen_setup_msi_interrupts(adapter, num_msix)) netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); else @@ -1616,8 +1621,6 @@ err_out_free_netdev: free_netdev(netdev); err_out_free_res: - if (NX_IS_REVISION_P3(pdev->revision)) - pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_out_disable_pdev: diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 22c6eaaf3d9f..c677b69bbb0b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1918,7 +1918,6 @@ static void qede_sync_free_irqs(struct qede_dev *edev) } edev->int_info.used_cnt = 0; - edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) @@ -2342,6 +2341,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) err4: qede_sync_free_irqs(edev); + memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 147effc16316..f2cb77c3b199 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -115,7 +115,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev, value = readl(&port_regs->CommonRegs.semaphoreReg); if ((value & (sem_mask >> 16)) == sem_bits) return 0; - mdelay(1000); + ssleep(1); } while (--seconds); return -1; } @@ -155,7 +155,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) "driver lock acquired\n"); return 1; } - mdelay(1000); + ssleep(1); } while (++i < 10); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); @@ -3287,7 +3287,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) if ((value & ISP_CONTROL_SR) == 0) break; - mdelay(1000); + ssleep(1); } while ((--max_wait_time)); /* @@ -3323,7 +3323,7 @@ static int ql_adapter_reset(struct ql3_adapter *qdev) ispControlStatus); if ((value & ISP_CONTROL_FSR) == 0) break; - mdelay(1000); + ssleep(1); } while ((--max_wait_time)); } if (max_wait_time == 0) @@ -3491,19 +3491,20 @@ static int ql_adapter_up(struct ql3_adapter *qdev) spin_lock_irqsave(&qdev->hw_lock, hw_flags); - if (!ql_wait_for_drvr_lock(qdev)) { + err = ql_wait_for_drvr_lock(qdev); + if (err) { + err = ql_adapter_initialize(qdev); + if (err) { + netdev_err(ndev, "Unable to initialize adapter\n"); + goto err_init; + } + netdev_err(ndev, "Releasing driver lock\n"); + ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); + } else { netdev_err(ndev, "Could not acquire driver lock\n"); - err = -ENODEV; goto err_lock; } - err = ql_adapter_initialize(qdev); - if (err) { - netdev_err(ndev, "Unable to initialize adapter\n"); - goto err_init; - } - ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); set_bit(QL_ADAPTER_UP, &qdev->flags); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f8846e0d0dd6..75ac5cc2fc23 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -1076,14 +1076,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter) sds_mbx_size = sizeof(struct qlcnic_sds_mbx); context_id = recv_ctx->context_id; num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS; - err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter, - QLCNIC_CMD_ADD_RCV_RINGS); - if (err) { - dev_err(&adapter->pdev->dev, - "Failed to alloc mbx args %d\n", err); - return err; - } - + ahw->hw_ops->alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_ADD_RCV_RINGS); cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16); /* set up status rings, mbx 2-81 */ @@ -3162,10 +3156,8 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); ret = QLCRD32(adapter, indirect_addr, &err); - if (err == -EIO) { - qlcnic_83xx_unlock_flash(adapter); + if (err == -EIO) return err; - } word = ret; *(u32 *)p_data = word; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 99fc0121da93..63ebc491057b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1039,7 +1039,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); if (!skb) - goto error; + break; qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); skb_put(skb, QLCNIC_ILB_PKT_SIZE); adapter->ahw->diag_cnt = 0; @@ -1063,7 +1063,6 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode) cnt++; } if (cnt != i) { -error: dev_err(&adapter->pdev->dev, "LB Test: failed, TX[%d], RX[%d]\n", i, cnt); if (mode != QLCNIC_ILB_MODE) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index eff587c6e9be..be41e4c77b65 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -440,6 +440,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); + qlcnic_rom_unlock(adapter); /* big hammer don't reset CAM block on reset */ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 11274b7ea36c..1205f6f9c941 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2506,7 +2506,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) qlcnic_sriov_vf_register_map(ahw); break; default: - err = -EINVAL; goto err_out_free_hw_res; } @@ -2706,7 +2705,6 @@ err_out_free_hw_res: kfree(ahw); err_out_free_res: - pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_out_disable_pdev: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 625336264a44..5174e0bd75d1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1426,7 +1426,6 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); - fw_dump->tmpl_hdr = NULL; if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); @@ -1445,8 +1444,6 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_83xx_dump_template_hdr *hdr; hdr = fw_dump->tmpl_hdr; - if (!hdr) - return; hdr->drv_cap_mask = 0x1f; fw_dump->cap_mask = 0x1f; dev_info(&pdev->dev, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index aab2db76d9ed..017d8c2c8285 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *, struct qlcnic_info *, u16); int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8); void qlcnic_sriov_free_vlans(struct qlcnic_adapter *); -int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *); +void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *); bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *); void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *, struct qlcnic_vf_info *, u16); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 03e24fcf87a8..ffa6885acfc8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -427,7 +427,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; - int i, num_vlans, ret; + int i, num_vlans; u16 *vlans; if (sriov->allowed_vlans) @@ -438,9 +438,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", sriov->num_allowed_vlans); - ret = qlcnic_sriov_alloc_vlans(adapter); - if (ret) - return ret; + qlcnic_sriov_alloc_vlans(adapter); if (!sriov->any_vlan) return 0; @@ -2149,7 +2147,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) return err; } -int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) +void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) { struct qlcnic_sriov *sriov = adapter->ahw->sriov; struct qlcnic_vf_info *vf; @@ -2159,11 +2157,7 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); - if (!vf->sriov_vlans) - return -ENOMEM; } - - return 0; } void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 238a0e58342f..afd687e5e779 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -598,9 +598,7 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, if (err) goto del_flr_queue; - err = qlcnic_sriov_alloc_vlans(adapter); - if (err) - goto del_flr_queue; + qlcnic_sriov_alloc_vlans(adapter); return err; diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 15a4a266824a..9a37247cf4b8 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -133,8 +133,6 @@ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ -#define MD_CSC 0xb6 /* MDC speed control register */ -#define MD_CSC_DEFAULT 0x0030 #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -371,9 +369,8 @@ static void r6040_reset_mac(struct r6040_private *lp) { void __iomem *ioaddr = lp->base; int limit = MAC_DEF_TIMEOUT; - u16 cmd, md_csc; + u16 cmd; - md_csc = ioread16(ioaddr + MD_CSC); iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); @@ -385,10 +382,6 @@ static void r6040_reset_mac(struct r6040_private *lp) iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); - - /* Restore MDIO clock frequency */ - if (md_csc != MD_CSC_DEFAULT) - iowrite16(md_csc, ioaddr + MD_CSC); } static void r6040_init_mac_regs(struct net_device *dev) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 55b0f11bf2a0..f3a685d3f649 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2314,7 +2314,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch(stringset) { case ETH_SS_STATS: - memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings)); + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); break; } } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 73fc8e9683b7..8413f93f5cd9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2210,7 +2210,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, sh_eth_gstrings_stats, + memcpy(data, *sh_eth_gstrings_stats, sizeof(sh_eth_gstrings_stats)); break; } @@ -2426,7 +2426,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) else txdesc->status |= cpu_to_edmac(mdp, TD_TACT); - wmb(); /* cur_tx must be incremented after TACT bit was set */ mdp->cur_tx++; if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) @@ -2508,9 +2507,9 @@ static int sh_eth_close(struct net_device *ndev) /* Free all the skbuffs in the Rx queue and the DMA buffer. */ sh_eth_ring_free(ndev); - mdp->is_opened = 0; + pm_runtime_put_sync(&mdp->pdev->dev); - pm_runtime_put(&mdp->pdev->dev); + mdp->is_opened = 0; return 0; } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 895154550608..56cdc01c5847 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2313,18 +2313,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return 1; + return -EINVAL; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 1; + return 0; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return 1; + return -EINVAL; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 8fce0c819a4b..3c17f274e802 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -378,17 +378,12 @@ fail1: return rc; } -/* Disable SRIOV and remove VFs - * If some VFs are attached to a guest (using Xen, only) nothing is - * done if force=false, and vports are freed if force=true (for the non - * attachedc ones, only) but SRIOV is not disabled and VFs are not - * removed in either case. - */ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; - unsigned int vfs_assigned = pci_vfs_assigned(dev); - int rc = 0; + unsigned int vfs_assigned = 0; + + vfs_assigned = pci_vfs_assigned(dev); if (vfs_assigned && !force) { netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " @@ -398,12 +393,10 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) if (!vfs_assigned) pci_disable_sriov(dev); - else - rc = -EBUSY; efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; - return rc; + return 0; } int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) @@ -422,6 +415,7 @@ int efx_ef10_sriov_init(struct efx_nic *efx) void efx_ef10_sriov_fini(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; int rc; if (!nic_data->vf) { @@ -431,7 +425,14 @@ void efx_ef10_sriov_fini(struct efx_nic *efx) return; } - /* Disable SRIOV and remove any VFs in the host */ + /* Remove any VFs in the host */ + for (i = 0; i < efx->vf_count; ++i) { + struct efx_nic *vf_efx = nic_data->vf[i].efx; + + if (vf_efx) + vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); + } + rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 9fe5d13402e0..dff5b56738d3 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -442,7 +442,7 @@ static int sis900_probe(struct pci_dev *pci_dev, #endif /* setup various bits in PCI command register */ - ret = pcim_enable_device(pci_dev); + ret = pci_enable_device(pci_dev); if(ret) return ret; i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); @@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev, ioaddr = pci_iomap(pci_dev, 0, 0); if (!ioaddr) { ret = -ENOMEM; - goto err_out; + goto err_out_cleardev; } sis_priv = netdev_priv(net_dev); @@ -576,6 +576,8 @@ err_unmap_tx: sis_priv->tx_ring_dma); err_out_unmap: pci_iounmap(pci_dev, ioaddr); +err_out_cleardev: + pci_release_regions(pci_dev); err_out: free_netdev(net_dev); return ret; @@ -2423,6 +2425,7 @@ static void sis900_remove(struct pci_dev *pci_dev) sis_priv->tx_ring_dma); pci_iounmap(pci_dev, sis_priv->ioaddr); free_netdev(net_dev); + pci_release_regions(pci_dev); } #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 092267d8a86d..0be9c74238fd 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -2057,11 +2057,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) ndev->dma = (unsigned char)-1; ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq < 0) { - ret = ndev->irq; - goto release_both; - } - lp = netdev_priv(ndev); lp->netdev = ndev; #ifdef SMC_DYNAMIC_BUS_CONFIG diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index f1eb9f99076a..ee5a7c05a0e6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -361,8 +361,6 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; plat_dat->multicast_filter_bins = 0; - plat_dat->tx_fifo_size = 8192; - plat_dat->rx_fifo_size = 8192; return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index d9cbc5dcf3ec..31ab5e749e66 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -39,7 +39,7 @@ struct sunxi_priv_data { static int sun7i_gmac_init(struct platform_device *pdev, void *priv) { struct sunxi_priv_data *gmac = priv; - int ret = 0; + int ret; if (gmac->regulator) { ret = regulator_enable(gmac->regulator); @@ -60,11 +60,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv) } else { clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE); ret = clk_prepare(gmac->tx_clk); - if (ret && gmac->regulator) - regulator_disable(gmac->regulator); + if (ret) + return ret; } - return ret; + return 0; } static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index db2a341ae4b3..b3fe0575ff6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -83,10 +83,10 @@ enum power_event { #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ /* GMAC HW ADDR regs */ -#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ - 0x00000040 + (reg * 8)) -#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \ - 0x00000044 + (reg * 8)) +#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \ + (reg * 8)) +#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \ + (reg * 8)) #define GMAC_MAX_PERFECT_ADDRESSES 1 /* PCS registers (AN/TBI/SGMII/RGMII) offset */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 76d956de23ac..6f695239e658 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1897,6 +1897,9 @@ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + if (priv->eee_enabled) + del_timer_sync(&priv->eee_ctrl_timer); + /* Stop and disconnect the PHY */ if (priv->phydev) { phy_stop(priv->phydev); @@ -1917,11 +1920,6 @@ static int stmmac_release(struct net_device *dev) if (priv->lpi_irq > 0) free_irq(priv->lpi_irq, dev); - if (priv->eee_enabled) { - priv->tx_path_in_lpi_mode = false; - del_timer_sync(&priv->eee_ctrl_timer); - } - /* Stop TX/RX DMA and clear the descriptors */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); @@ -3070,11 +3068,6 @@ int stmmac_suspend(struct net_device *ndev) napi_disable(&priv->napi); - if (priv->eee_enabled) { - priv->tx_path_in_lpi_mode = false; - del_timer_sync(&priv->eee_ctrl_timer); - } - /* Stop TX/RX DMA */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); @@ -3165,7 +3158,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return 1; + return -EINVAL; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -3202,11 +3195,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 1; + return 0; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return 1; + return -EINVAL; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 7354ad25252d..85f3a2c0d4dd 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3948,6 +3948,8 @@ static void niu_xmac_interrupt(struct niu *np) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) @@ -8166,10 +8168,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) "VPD_SCAN: Reading in property [%s] len[%d]\n", namebuf, prop_len); for (i = 0; i < prop_len; i++) { - err = niu_pci_eeprom_read(np, off + i); - if (err < 0) - return err; - *prop_buf++ = err; + err = niu_pci_eeprom_read(np, off + i); + if (err >= 0) + *prop_buf = err; + ++prop_buf; } } @@ -8180,14 +8182,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end) } /* ESPC_PIO_EN_ENABLE must be set */ -static int niu_pci_vpd_fetch(struct niu *np, u32 start) +static void niu_pci_vpd_fetch(struct niu *np, u32 start) { u32 offset; int err; err = niu_pci_eeprom_read16_swp(np, start + 1); if (err < 0) - return err; + return; offset = err + 3; @@ -8196,14 +8198,12 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start) u32 end; err = niu_pci_eeprom_read(np, here); - if (err < 0) - return err; if (err != 0x90) - return -EINVAL; + return; err = niu_pci_eeprom_read16_swp(np, here + 1); if (err < 0) - return err; + return; here = start + offset + 3; end = start + offset + err; @@ -8211,13 +8211,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start) offset += err; err = niu_pci_vpd_scan_props(np, here, end); - if (err < 0) - return err; - /* ret == 1 is not an error */ - if (err == 1) - return 0; + if (err < 0 || err == 1) + return; } - return 0; } /* ESPC_PIO_EN_ENABLE must be set */ @@ -9310,11 +9306,8 @@ static int niu_get_invariants(struct niu *np) offset = niu_pci_vpd_offset(np); netif_printk(np, probe, KERN_DEBUG, np->dev, "%s() VPD offset [%08x]\n", __func__, offset); - if (offset) { - err = niu_pci_vpd_fetch(np, offset); - if (err < 0) - return err; - } + if (offset) + niu_pci_vpd_fetch(np, offset); nw64(ESPC_PIO_EN, 0); if (np->flags & NIU_FLAGS_VPD_VALID) { diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 19c832aaecf0..14c9d1baa85c 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -2068,7 +2068,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /*bdx_hw_reset(priv); */ if (bdx_read_mac(priv)) { pr_err("load MAC address failed\n"); - err = -EFAULT; goto err_out_iomap; } SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ac61d017a4b5..e4c4747bdf32 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -183,11 +183,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; /* EMAC mac_status register */ #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) -#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000) +#define EMAC_MACSTATUS_TXERRCH_MASK (0x7) #define EMAC_MACSTATUS_TXERRCH_SHIFT (16) #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) -#define EMAC_MACSTATUS_RXERRCH_MASK (0x700) +#define EMAC_MACSTATUS_RXERRCH_MASK (0x7) #define EMAC_MACSTATUS_RXERRCH_SHIFT (8) /* EMAC RX register masks */ @@ -555,20 +555,8 @@ static int emac_set_coalesce(struct net_device *ndev, u32 int_ctrl, num_interrupts = 0; u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0; - if (!coal->rx_coalesce_usecs) { - priv->coal_intvl = 0; - - switch (priv->version) { - case EMAC_VERSION_2: - emac_ctrl_write(EMAC_DM646X_CMINTCTRL, 0); - break; - default: - emac_ctrl_write(EMAC_CTRL_EWINTTCNT, 0); - break; - } - - return 0; - } + if (!coal->rx_coalesce_usecs) + return -EINVAL; coal_intvl = coal->rx_coalesce_usecs; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 463c6db67146..37b9b39192ec 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1284,9 +1284,9 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id, KNAV_QUEUE_SHARED); if (IS_ERR(tx_pipe->dma_queue)) { - ret = PTR_ERR(tx_pipe->dma_queue); dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n", name, ret); + ret = PTR_ERR(tx_pipe->dma_queue); goto err; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index bc1638b0073f..399a89f30826 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -313,8 +313,9 @@ static void tlan_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); #endif - cancel_work_sync(&priv->tlan_tqueue); free_netdev(dev); + + cancel_work_sync(&priv->tlan_tqueue); } static void tlan_start(struct net_device *dev) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 74092c0eeafd..ed6a88cf3281 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -735,9 +735,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* Kick off the transfer */ lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ - if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) - netif_stop_queue(ndev); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c15283af2bae..44870fc37f54 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -671,7 +671,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (axienet_check_tx_bd_space(lp, num_frag + 1)) { + if (axienet_check_tx_bd_space(lp, num_frag)) { if (netif_queue_stopped(ndev)) return NETDEV_TX_BUSY; @@ -681,7 +681,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) smp_mb(); /* Space might have just been freed - check again */ - if (axienet_check_tx_bd_space(lp, num_frag + 1)) + if (axienet_check_tx_bd_space(lp, num_frag)) return NETDEV_TX_BUSY; netif_wake_queue(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 26cd42bfef0c..909a008f9927 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1180,8 +1180,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev) } dev_info(dev, - "Xilinx EmacLite at 0x%08X mapped to 0x%p, irq=%d\n", - (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); + "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", + (unsigned int __force)ndev->mem_start, + (unsigned int __force)lp->base_addr, ndev->irq); return 0; error: diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig index ecebeeb9b2a0..3a424c864f4d 100644 --- a/drivers/net/fddi/Kconfig +++ b/drivers/net/fddi/Kconfig @@ -28,20 +28,17 @@ config DEFXX config DEFXX_MMIO bool - prompt "Use MMIO instead of IOP" if PCI || EISA + prompt "Use MMIO instead of PIO" if PCI || EISA depends on DEFXX - default n if EISA + default n if PCI || EISA default y ---help--- This instructs the driver to use EISA or PCI memory-mapped I/O - (MMIO) as appropriate instead of programmed I/O ports (IOP). + (MMIO) as appropriate instead of programmed I/O ports (PIO). Enabling this gives an improvement in processing time in parts - of the driver, but it requires a memory window to be configured - for EISA (DEFEA) adapters that may not always be available. - Conversely some PCIe host bridges do not support IOP, so MMIO - may be required to access PCI (DEFPA) adapters on downstream PCI - buses with some systems. TURBOchannel does not have the concept - of I/O ports, so MMIO is always used for these (DEFTA) adapters. + of the driver, but it may cause problems with EISA (DEFEA) + adapters. TURBOchannel does not have the concept of I/O ports, + so MMIO is always used for these (DEFTA) adapters. If unsure, say N. diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index ebba57988f0a..7f975a2c8990 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -495,25 +495,6 @@ static const struct net_device_ops dfx_netdev_ops = { .ndo_set_mac_address = dfx_ctl_set_mac_address, }; -static void dfx_register_res_alloc_err(const char *print_name, bool mmio, - bool eisa) -{ - pr_err("%s: Cannot use %s, no address set, aborting\n", - print_name, mmio ? "MMIO" : "I/O"); - pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n", - print_name, mmio ? 'n' : 'y'); - if (eisa && mmio) - pr_err("%s: Or run ECU and set adapter's MMIO location\n", - print_name); -} - -static void dfx_register_res_err(const char *print_name, bool mmio, - unsigned long start, unsigned long len) -{ - pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n", - print_name, mmio ? "MMIO" : "I/O", len, start); -} - /* * ================ * = dfx_register = @@ -587,12 +568,15 @@ static int dfx_register(struct device *bdev) dev_set_drvdata(bdev, dev); dfx_get_bars(bdev, bar_start, bar_len); - if (bar_len[0] == 0 || - (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) { - dfx_register_res_alloc_err(print_name, dfx_use_mmio, - dfx_bus_eisa); + if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) { + pr_err("%s: Cannot use MMIO, no address set, aborting\n", + print_name); + pr_err("%s: Run ECU and set adapter's MMIO location\n", + print_name); + pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\"" + "\n", print_name); err = -ENXIO; - goto err_out_disable; + goto err_out; } if (dfx_use_mmio) @@ -601,16 +585,18 @@ static int dfx_register(struct device *bdev) else region = request_region(bar_start[0], bar_len[0], print_name); if (!region) { - dfx_register_res_err(print_name, dfx_use_mmio, - bar_start[0], bar_len[0]); + pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, " + "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name, + (long)bar_len[0], (long)bar_start[0]); err = -EBUSY; goto err_out_disable; } if (bar_start[1] != 0) { region = request_region(bar_start[1], bar_len[1], print_name); if (!region) { - dfx_register_res_err(print_name, 0, - bar_start[1], bar_len[1]); + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[1], (long)bar_start[1]); err = -EBUSY; goto err_out_csr_region; } @@ -618,8 +604,9 @@ static int dfx_register(struct device *bdev) if (bar_start[2] != 0) { region = request_region(bar_start[2], bar_len[2], print_name); if (!region) { - dfx_register_res_err(print_name, 0, - bar_start[2], bar_len[2]); + pr_err("%s: Cannot reserve I/O resource " + "0x%lx @ 0x%lx, aborting\n", print_name, + (long)bar_len[2], (long)bar_start[2]); err = -EBUSY; goto err_out_bh_region; } diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 908de9e45394..732c68ed166a 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -68,9 +68,9 @@ #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ -#define SIXP_TXDELAY 25 /* 250 ms */ +#define SIXP_TXDELAY (HZ/4) /* in 1 s */ #define SIXP_PERSIST 50 /* in 256ths */ -#define SIXP_SLOTTIME 10 /* 100 ms */ +#define SIXP_SLOTTIME (HZ/10) /* in 1 s */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ @@ -874,12 +874,6 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) return; } - if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { - pr_err("6pack: cooked buffer overrun, data loss\n"); - sp->rx_count = 0; - return; - } - buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2d0090fb26f5..470d416f2b86 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -41,8 +41,6 @@ #define AX_MTU 236 -/* some arch define END as assembly function ending, just undef it */ -#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ @@ -805,15 +803,13 @@ static void mkiss_close(struct tty_struct *tty) */ netif_stop_queue(ax->dev); - unregister_netdev(ax->dev); - - /* Free all AX25 frame buffers after unreg. */ + /* Free all AX25 frame buffers. */ kfree(ax->rbuff); kfree(ax->xbuff); ax->tty = NULL; - free_netdev(ax->dev); + unregister_netdev(ax->dev); } /* Perform I/O control on an active ax25 channel. */ diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index eb68a66538f7..d5e0e2aedc55 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -77,9 +77,7 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe, ret = usb_control_msg(usb_dev, pipe, request, requesttype, value, index, data, size, timeout); - if (ret < size) { - ret = ret < 0 ? ret : -ENODATA; - + if (ret < 0) { atusb->err = ret; dev_err(&usb_dev->dev, "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n", @@ -342,7 +340,6 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n) return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); - usb_free_urb(urb); n--; } return 0; @@ -569,9 +566,9 @@ static int atusb_get_and_show_build(struct atusb *atusb) if (!build) return -ENOMEM; - /* We cannot call atusb_control_msg() here, since this request may read various length data */ - ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, - ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, + build, ATUSB_BUILD_SIZE, 1000); if (ret >= 0) { build[ret] = 0; dev_info(&usb_dev->dev, "Firmware: build %s\n", build); diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h index f7ad94773d81..e7fe41117003 100644 --- a/drivers/net/phy/dp83640_reg.h +++ b/drivers/net/phy/dp83640_reg.h @@ -4,7 +4,7 @@ #ifndef HAVE_DP83640_REGISTERS #define HAVE_DP83640_REGISTERS -/* #define PAGE0 0x0000 */ +#define PAGE0 0x0000 #define PHYCR2 0x001c /* PHY Control Register 2 */ #define PAGE4 0x0004 diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2fb95cca3318..ccefba7af960 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -264,13 +264,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); - /* We need to set state to MDIOBUS_UNREGISTERED to correctly release - * the device in mdiobus_free() - * - * State will be updated later in this function in case of success - */ - bus->state = MDIOBUS_UNREGISTERED; - err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); @@ -295,7 +288,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } bus->state = MDIOBUS_REGISTERED; - dev_dbg(&bus->dev, "probed\n"); + pr_info("%s: probed\n", bus->name); return 0; error: @@ -315,8 +308,7 @@ void mdiobus_unregister(struct mii_bus *bus) { int i; - if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED)) - return; + BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; for (i = 0; i < PHY_MAX_ADDR; i++) { diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 679b14759379..46448d7e3290 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -70,8 +70,6 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ -#define PPP_PROTO_LEN 2 - /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -489,9 +487,6 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, if (!pf) return -ENXIO; - /* All PPP packets should start with the 2-byte protocol */ - if (count < PPP_PROTO_LEN) - return -EINVAL; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) @@ -1298,7 +1293,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; + ppp->stats64.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d334e740b013..93c7024fcc67 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -71,14 +71,6 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include @@ -821,7 +813,6 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); int txq = skb->queue_mapping; - struct netdev_queue *queue; struct tun_file *tfile; u32 numqueues = 0; @@ -885,10 +876,6 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* Enqueue packet */ skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); - /* NETIF_F_LLTX requires to do our own update of trans_start */ - queue = netdev_get_tx_queue(dev, txq); - queue->trans_start = jiffies; - /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); @@ -1902,45 +1889,6 @@ unlock: return ret; } -/* Return correct value for tun->dev->addr_len based on tun->dev->type. */ -static unsigned char tun_get_addr_len(unsigned short type) -{ - switch (type) { - case ARPHRD_IP6GRE: - case ARPHRD_TUNNEL6: - return sizeof(struct in6_addr); - case ARPHRD_IPGRE: - case ARPHRD_TUNNEL: - case ARPHRD_SIT: - return 4; - case ARPHRD_ETHER: - return ETH_ALEN; - case ARPHRD_IEEE802154: - case ARPHRD_IEEE802154_MONITOR: - return IEEE802154_EXTENDED_ADDR_LEN; - case ARPHRD_PHONET_PIPE: - case ARPHRD_PPP: - case ARPHRD_NONE: - return 0; - case ARPHRD_6LOWPAN: - return EUI64_ADDR_LEN; - case ARPHRD_FDDI: - return FDDI_K_ALEN; - case ARPHRD_HIPPI: - return HIPPI_ALEN; - case ARPHRD_IEEE802: - return FC_ALEN; - case ARPHRD_ROSE: - return ROSE_ADDR_LEN; - case ARPHRD_NETROM: - return AX25_ADDR_LEN; - case ARPHRD_LOCALTLK: - return LTALK_ALEN; - default: - return 0; - } -} - static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { @@ -2085,7 +2033,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = -EBUSY; } else { tun->dev->type = (int) arg; - tun->dev->addr_len = tun_get_addr_len(tun->dev->type); tun_debug(KERN_INFO, tun, "linktype set to %d\n", tun->dev->type); ret = 0; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 9272d0f93819..3a7286256db0 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -98,10 +98,6 @@ config USB_RTL8150 config USB_RTL8152 tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" select MII - select CRC32 - select CRYPTO - select CRYPTO_HASH - select CRYPTO_SHA256 help This option adds support for Realtek RTL8152 based USB 2.0 10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 5a2716360613..2dcc8a039d42 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -307,12 +307,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, int ret; if (2 == size) { - u16 buf = 0; + u16 buf; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le16_to_cpus(&buf); *((u16 *)data) = buf; } else if (4 == size) { - u32 buf = 0; + u32 buf; ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0); le32_to_cpus(&buf); *((u32 *)data) = buf; @@ -1369,69 +1369,59 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* At the end of the SKB, there's a header telling us how many packets - * are bundled into this buffer and where we can find an array of - * per-packet metadata (which contains elements encoded into u16). - */ - if (skb->len < 4) + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) return 0; + skb_trim(skb, skb->len - 4); memcpy(&rx_hdr, skb_tail_pointer(skb), 4); le32_to_cpus(&rx_hdr); + pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); - - if (pkt_cnt == 0) - return 0; - - /* Make sure that the bounds of the metadata array are inside the SKB - * (and in front of the counter at the end). - */ - if (pkt_cnt * 2 + hdr_off > skb->len) - return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - /* Packets must not overlap the metadata array */ - skb_trim(skb, hdr_off); - - for (; ; pkt_cnt--, pkt_hdr++) { + while (pkt_cnt--) { u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; - if (pkt_len > skb->len) - return 0; - /* Check CRC or runt packet */ - if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && - pkt_len >= 2 + ETH_HLEN) { - bool last = (pkt_cnt == 0); - - if (last) { - ax_skb = skb; - } else { - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (!ax_skb) - return 0; - } + if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || + (*pkt_hdr & AX_RXHDR_DROP_ERR)) { + skb_pull(skb, (pkt_len + 7) & 0xFFF8); + pkt_hdr++; + continue; + } + + if (pkt_cnt == 0) { + skb->len = pkt_len; + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); + skb->truesize = pkt_len + sizeof(struct sk_buff); + ax88179_rx_checksum(skb, pkt_hdr); + return 1; + } + + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (ax_skb) { ax_skb->len = pkt_len; /* Skip IP alignment pseudo header */ skb_pull(ax_skb, 2); skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); - - if (last) - return 1; - usbnet_skb_return(dev, ax_skb); + } else { + return 0; } - /* Trim this packet away from the SKB */ - if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) - return 0; + skb_pull(skb, (pkt_len + 7) & 0xFFF8); + pkt_hdr++; } + return 1; } static struct sk_buff * diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 84e0e7f78029..ff2270ead2e6 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -406,8 +406,6 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i err = register_netdev(dev); if (err) { - /* Set disconnected flag so that disconnect() returns early. */ - pnd->disconnected = 1; usb_driver_release_interface(&usbpn_driver, data_intf); goto out; } diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 9c15e1a1261b..f7180f8db39e 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -138,10 +138,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, } skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); - dev_kfree_skb_any(skb); if (!skb2) return NULL; + dev_kfree_skb_any(skb); skb = skb2; done: diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 72411f0de54f..3707aab2423b 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -486,11 +486,6 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE -#define ZAURUS_FAKE_INTERFACE \ - .bInterfaceClass = USB_CLASS_COMM, \ - .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ - .bInterfaceProtocol = USB_CDC_PROTO_NONE - /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -544,13 +539,6 @@ static const struct usb_device_id products[] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, -}, { - .match_flags = USB_DEVICE_ID_MATCH_INT_INFO - | USB_DEVICE_ID_MATCH_DEVICE, - .idVendor = 0x04DD, - .idProduct = 0x9032, /* SL-6000 */ - ZAURUS_FAKE_INTERFACE, - .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 2c9e4ab99fe0..bab13ccfb085 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -175,8 +175,6 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) /* clamp new_tx to sane values */ min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16); max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); - if (max == 0) - max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); @@ -1081,10 +1079,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * accordingly. Otherwise, we should check here. */ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) - delayed_ndp_size = ctx->max_ndp_size + - max_t(u32, - ctx->tx_ndp_modulus, - ctx->tx_modulus + ctx->tx_remainder) - 1; + delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); else delayed_ndp_size = 0; @@ -1237,8 +1232,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && skb_out->len > ctx->min_tx_pkt) { padding_count = ctx->tx_max - skb_out->len; - if (!WARN_ON(padding_count > ctx->tx_max)) - memset(skb_put(skb_out, padding_count), 0, padding_count); + memset(skb_put(skb_out, padding_count), 0, padding_count); } else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) { *skb_put(skb_out, 1) = 0; /* force short packet */ @@ -1559,6 +1553,9 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ + netif_info(dev, link, dev->net, + "network connection: %sconnected\n", + !!event->wValue ? "" : "dis"); usbnet_link_change(dev, !!event->wValue, 0); break; @@ -1582,7 +1579,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_LINK_INTR | FLAG_ETHER, + | FLAG_LINK_INTR, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index b35a887ba594..efd4bf06f6ad 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -635,7 +635,7 @@ static struct hso_serial *get_serial_by_index(unsigned index) return serial; } -static int obtain_minor(struct hso_serial *serial) +static int get_free_serial_index(void) { int index; unsigned long flags; @@ -643,10 +643,8 @@ static int obtain_minor(struct hso_serial *serial) spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { - serial_table[index] = serial->parent; - serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); - return 0; + return index; } } spin_unlock_irqrestore(&serial_table_lock, flags); @@ -655,12 +653,15 @@ static int obtain_minor(struct hso_serial *serial) return -1; } -static void release_minor(struct hso_serial *serial) +static void set_serial_by_index(unsigned index, struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); - serial_table[serial->minor] = NULL; + if (serial) + serial_table[index] = serial->parent; + else + serial_table[index] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } @@ -1710,7 +1711,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty, spin_unlock_irqrestore(&serial->serial_lock, flags); return usb_control_msg(serial->parent->usb, - usb_sndctrlpipe(serial->parent->usb, 0), 0x22, + usb_rcvctrlpipe(serial->parent->usb, 0), 0x22, 0x21, val, if_num, NULL, 0, USB_CTRL_SET_TIMEOUT); } @@ -2248,7 +2249,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); - release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) @@ -2273,23 +2273,25 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { struct device *dev; + int minor; int i; tty_port_init(&serial->port); - if (obtain_minor(serial)) + minor = get_free_serial_index(); + if (minor < 0) goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, - tty_drv, serial->minor, &serial->parent->interface->dev, + tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); - if (IS_ERR(serial->parent->dev)) { - release_minor(serial); + if (IS_ERR(serial->parent->dev)) goto exit2; - } dev = serial->parent->dev; + /* fill in specific data for later use */ + serial->minor = minor; serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; @@ -2461,7 +2463,7 @@ static int hso_rfkill_set_block(void *data, bool blocked) if (hso_dev->usb_gone) rv = 0; else - rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0), + rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0), enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); mutex_unlock(&hso_dev->mutex); @@ -2522,7 +2524,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); - goto err_hso_dev; + goto exit; } hso_net = netdev_priv(net); @@ -2535,67 +2537,54 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); - goto err_net; + goto exit; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); - goto err_net; + goto exit; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); + /* registering our net device */ + result = register_netdev(net); + if (result) { + dev_err(&interface->dev, "Failed to register device\n"); + goto exit; + } + /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) { dev_err(&interface->dev, "Could not allocate rx urb\n"); - goto err_mux_bulk_rx; + goto exit; } hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) - goto err_mux_bulk_rx; + goto exit; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) { dev_err(&interface->dev, "Could not allocate tx urb\n"); - goto err_mux_bulk_rx; + goto exit; } hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) - goto err_free_tx_urb; + goto exit; add_net_device(hso_dev); - /* registering our net device */ - result = register_netdev(net); - if (result) { - dev_err(&interface->dev, "Failed to register device\n"); - goto err_free_tx_buf; - } - hso_log_port(hso_dev); hso_create_rfkill(hso_dev, interface); return hso_dev; - -err_free_tx_buf: - remove_net_device(hso_dev); - kfree(hso_net->mux_bulk_tx_buf); -err_free_tx_urb: - usb_free_urb(hso_net->mux_bulk_tx_urb); -err_mux_bulk_rx: - for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { - usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); - kfree(hso_net->mux_bulk_rx_buf_pool[i]); - } -err_net: - free_netdev(net); -err_hso_dev: - kfree(hso_dev); +exit: + hso_free_net_device(hso_dev); return NULL; } @@ -2703,6 +2692,9 @@ static struct hso_device *hso_create_bulk_serial_device( serial->write_data = hso_std_serial_write_data; + /* and record this serial */ + set_serial_by_index(serial->minor, serial); + /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); @@ -2742,14 +2734,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) - goto err_free_dev; + goto exit; hso_dev->port_data.dev_serial = serial; serial->parent = hso_dev; if (hso_serial_common_create (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE)) - goto err_free_serial; + goto exit; serial->tx_data_length--; serial->write_data = hso_mux_serial_write_data; @@ -2759,15 +2751,20 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); + /* and record this serial */ + set_serial_by_index(serial->minor, serial); + /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; -err_free_serial: - kfree(serial); -err_free_dev: +exit: + if (serial) { + tty_unregister_device(tty_drv, serial->minor); + kfree(serial); + } kfree(hso_dev); return NULL; @@ -3142,7 +3139,8 @@ static void hso_free_interface(struct usb_interface *interface) cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); - kref_put(&serial->parent->ref, hso_serial_ref_free); + kref_put(&serial_table[i]->ref, hso_serial_ref_free); + set_serial_by_index(i, NULL); } } diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 52ed3da64f01..2b16a5fed9de 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -70,7 +70,7 @@ #define IPHETH_USBINTF_SUBCLASS 253 #define IPHETH_USBINTF_PROTO 1 -#define IPHETH_BUF_SIZE 1514 +#define IPHETH_BUF_SIZE 1516 #define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) @@ -173,7 +173,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) if (tx_buf == NULL) goto free_rx_urb; - rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, + rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; @@ -198,7 +198,7 @@ error_nomem: static void ipheth_free_urbs(struct ipheth_device *iphone) { - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); @@ -371,7 +371,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), - dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, + dev->rx_buf, IPHETH_BUF_SIZE, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 08c5c68057da..1439863e9061 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -66,8 +66,6 @@ #define LAN7850_USB_PRODUCT_ID (0x7850) #define LAN78XX_EEPROM_MAGIC (0x78A5) #define LAN78XX_OTP_MAGIC (0x78F3) -#define AT29M2AF_USB_VENDOR_ID (0x07C9) -#define AT29M2AF_USB_PRODUCT_ID (0x0012) #define MII_READ 1 #define MII_WRITE 0 @@ -611,9 +609,11 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); if (ret == 0) { - if (sig == OTP_INDICATOR_2) + if (sig == OTP_INDICATOR_1) + offset = offset; + else if (sig == OTP_INDICATOR_2) offset += 0x100; - else if (sig != OTP_INDICATOR_1) + else ret = -EINVAL; if (!ret) ret = lan78xx_read_raw_otp(dev, offset, length, data); @@ -2956,12 +2956,6 @@ static int lan78xx_probe(struct usb_interface *intf, dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1); - /* Reject broken descriptors. */ - if (dev->maxpacket == 0) { - ret = -ENODEV; - goto out3; - } - /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1; @@ -3336,10 +3330,6 @@ static const struct usb_device_id products[] = { /* LAN7850 USB Gigabit Ethernet Device */ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID), }, - { - /* ATM2-AF USB Gigabit Ethernet Device */ - USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID), - }, {}, }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 95151b46f200..4f345bd4e6e2 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -121,16 +121,8 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { - int ret; - - ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, - 0x0000, index, data, size); - if (ret < 0) - return ret; - else if (ret < size) - return -ENODATA; - - return ret; + return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, + 0x0000, index, data, size); } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index a7b1df01e0f7..4f505eb4f422 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -755,16 +755,12 @@ static inline void disable_net_traffic(pegasus_t *pegasus) set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } -static inline int get_interrupt_interval(pegasus_t *pegasus) +static inline void get_interrupt_interval(pegasus_t *pegasus) { u16 data; u8 interval; - int ret; - - ret = read_eprom_word(pegasus, 4, &data); - if (ret < 0) - return ret; + read_eprom_word(pegasus, 4, &data); interval = data >> 8; if (pegasus->usb->speed != USB_SPEED_HIGH) { if (interval < 0x80) { @@ -779,8 +775,6 @@ static inline int get_interrupt_interval(pegasus_t *pegasus) } } pegasus->intr_interval = interval; - - return 0; } static void set_carrier(struct net_device *net) @@ -1196,9 +1190,7 @@ static int pegasus_probe(struct usb_interface *intf, | NETIF_MSG_PROBE | NETIF_MSG_LINK); pegasus->features = usb_dev_id[dev_index].private; - res = get_interrupt_interval(pegasus); - if (res) - goto out2; + get_interrupt_interval(pegasus); if (reset_mac(pegasus)) { dev_err(&intf->dev, "can't reset MAC\n"); res = -EIO; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 943dab8ef1e2..42303f3f1348 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -877,7 +877,6 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */ - {QMI_FIXED_INTF(0x19d2, 0x1275, 3)}, /* ZTE P685M */ {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, @@ -935,7 +934,6 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ - {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ebf6d4cf09ea..27e9c089b2fc 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3159,10 +3159,9 @@ static int rtl8152_close(struct net_device *netdev) tp->rtl_ops.down(tp); mutex_unlock(&tp->control); - } - if (!res) usb_autopm_put_interface(tp->intf); + } free_all_mem(tp); @@ -3821,7 +3820,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: - memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings)); + memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings)); break; } } diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a53823720fb6..b20b380d91bf 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -398,7 +398,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) reply_len = sizeof *phym; retval = rndis_query(dev, intf, u.buf, RNDIS_OID_GEN_PHYSICAL_MEDIUM, - reply_len, (void **)&phym, &reply_len); + 0, (void **) &phym, &reply_len); if (retval != 0 || !phym) { /* OID is optional so don't fail here. */ phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED); @@ -615,11 +615,6 @@ static const struct usb_device_id products [] = { USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042, USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_poll_status_info, -}, { - /* Hytera Communications DMR radios' "Radio to PC Network" */ - USB_VENDOR_AND_INTERFACE_INFO(0x238b, - USB_CLASS_COMM, 2 /* ACM */, 0x0ff), - .driver_info = (unsigned long)&rndis_info, }, { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9fe6a8d899b0..234febc6e1d9 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -1485,7 +1485,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); - goto free_pdata; + return ret; } smsc75xx_init_mac_address(dev); @@ -1494,7 +1494,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) ret = smsc75xx_reset(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); - goto cancel_work; + return ret; } dev->net->netdev_ops = &smsc75xx_netdev_ops; @@ -1503,13 +1503,6 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; - -cancel_work: - cancel_work_sync(&pdata->set_multicast); -free_pdata: - kfree(pdata); - dev->data[0] = 0; - return ret; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -1519,6 +1512,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) cancel_work_sync(&pdata->set_multicast); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); + pdata = NULL; dev->data[0] = 0; } } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index f4c4df01874c..aadfe1d1c37e 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -409,7 +409,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN || len > skb->len) + if (len > ETH_FRAME_LEN) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 27c0ef702ae2..db178e921e5d 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1732,11 +1732,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (!dev->rx_urb_size) dev->rx_urb_size = dev->hard_mtu; dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); - if (dev->maxpacket == 0) { - /* that is a broken device */ - status = -ENODEV; - goto out4; - } /* let userspace know we have a random address */ if (ether_addr_equal(net->dev_addr, node_id)) diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 3d126761044f..6aaa6eb9df72 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -268,11 +268,6 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE -#define ZAURUS_FAKE_INTERFACE \ - .bInterfaceClass = USB_CLASS_COMM, \ - .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ - .bInterfaceProtocol = USB_CDC_PROTO_NONE - /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -330,13 +325,6 @@ static const struct usb_device_id products [] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, -}, { - .match_flags = USB_DEVICE_ID_MATCH_INT_INFO - | USB_DEVICE_ID_MATCH_DEVICE, - .idVendor = 0x04DD, - .idProduct = 0x9032, /* SL-6000 */ - ZAURUS_FAKE_INTERFACE, - .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 345784274410..2759d386ade7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1372,16 +1372,14 @@ static int virtnet_set_channels(struct net_device *dev, get_online_cpus(); err = virtnet_set_queues(vi, queue_pairs); - if (err) { - put_online_cpus(); - goto err; + if (!err) { + netif_set_real_num_tx_queues(dev, queue_pairs); + netif_set_real_num_rx_queues(dev, queue_pairs); + + virtnet_set_affinity(vi); } - virtnet_set_affinity(vi); put_online_cpus(); - netif_set_real_num_tx_queues(dev, queue_pairs); - netif_set_real_num_rx_queues(dev, queue_pairs); -err: return err; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 474ee7e723d0..419c045d0752 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3389,6 +3389,7 @@ vmxnet3_suspend(struct device *device) vmxnet3_free_intr_resources(adapter); netif_device_detach(netdev); + netif_tx_stop_all_queues(netdev); /* Create wake-up filters. */ pmConf = adapter->pm_conf; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 4d44ec5b7cd7..50ede6b8b874 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1549,7 +1549,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) struct neighbour *n; struct inet6_dev *in6_dev; - rcu_read_lock(); in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; @@ -1606,7 +1605,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) } out: - rcu_read_unlock(); consume_skb(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index e38a8dca24cb..473a9b8ec9ba 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -572,13 +572,6 @@ static void ppp_timer(unsigned long arg) unsigned long flags; spin_lock_irqsave(&ppp->lock, flags); - /* mod_timer could be called after we entered this function but - * before we got the lock. - */ - if (timer_pending(&proto->timer)) { - spin_unlock_irqrestore(&ppp->lock, flags); - return; - } switch (proto->state) { case STOPPING: case REQ_SENT: diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 24daa1d0e9c5..ef746ba74ab4 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -56,8 +56,6 @@ struct lapbethdev { struct list_head node; struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* lapbeth device (lapb#) */ - bool up; - spinlock_t up_lock; /* Protects "up" */ }; static LIST_HEAD(lapbeth_devices); @@ -105,9 +103,8 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe rcu_read_lock(); lapbeth = lapbeth_get_x25_dev(dev); if (!lapbeth) - goto drop_unlock_rcu; - spin_lock_bh(&lapbeth->up_lock); - if (!lapbeth->up) + goto drop_unlock; + if (!netif_running(lapbeth->axdev)) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256; @@ -122,14 +119,11 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe goto drop_unlock; } out: - spin_unlock_bh(&lapbeth->up_lock); rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto out; -drop_unlock_rcu: - rcu_read_unlock(); drop: kfree_skb(skb); return 0; @@ -157,11 +151,13 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) { - struct lapbethdev *lapbeth = netdev_priv(dev); int err; - spin_lock_bh(&lapbeth->up_lock); - if (!lapbeth->up) + /* + * Just to be *really* sure not to send anything if the interface + * is down, the ethernet device may have gone. + */ + if (!netif_running(dev)) goto drop; /* There should be a pseudo header of 1 byte added by upper layers. @@ -192,7 +188,6 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, goto drop; } out: - spin_unlock_bh(&lapbeth->up_lock); return NETDEV_TX_OK; drop: kfree_skb(skb); @@ -284,7 +279,6 @@ static const struct lapb_register_struct lapbeth_callbacks = { */ static int lapbeth_open(struct net_device *dev) { - struct lapbethdev *lapbeth = netdev_priv(dev); int err; if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) { @@ -292,21 +286,15 @@ static int lapbeth_open(struct net_device *dev) return -ENODEV; } - spin_lock_bh(&lapbeth->up_lock); - lapbeth->up = true; - spin_unlock_bh(&lapbeth->up_lock); - + netif_start_queue(dev); return 0; } static int lapbeth_close(struct net_device *dev) { - struct lapbethdev *lapbeth = netdev_priv(dev); int err; - spin_lock_bh(&lapbeth->up_lock); - lapbeth->up = false; - spin_unlock_bh(&lapbeth->up_lock); + netif_stop_queue(dev); if ((err = lapb_unregister(dev)) != LAPB_OK) pr_err("lapb_unregister error: %d\n", err); @@ -365,9 +353,6 @@ static int lapbeth_new_device(struct net_device *dev) dev_hold(dev); lapbeth->ethdev = dev; - lapbeth->up = false; - spin_lock_init(&lapbeth->up_lock); - rc = -EIO; if (register_netdevice(ndev)) goto fail; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 88cf948ce8d4..c178e1218347 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -926,8 +926,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; default: printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); - unregister_hdlc_device(dev); - return -EIO; break; } diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index e8473047b2d1..dc6fe93ce71f 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c @@ -101,7 +101,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, if (cmd == NULL) goto error_alloc; cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL); - cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf)); + cmd->hdr.length = sizeof(cmd->sw_rf); cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION); cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status)); diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index b107e3c2ad5d..3b2771ff2796 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -153,10 +153,6 @@ static void ar5523_cmd_rx_cb(struct urb *urb) ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } - if (!cmd->odata) { - ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); - return; - } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index 135600405dd0..da7a7c8dafb2 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -199,13 +199,12 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr); void ath_hw_setbssidmask(struct ath_common *common); -void ath_key_delete(struct ath_common *common, u8 hw_key_idx); +void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key); int ath_key_config(struct ath_common *common, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); bool ath_hw_keyreset(struct ath_common *common, u16 entry); -bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac); void ath_hw_cycle_counters_update(struct ath_common *common); int32_t ath_hw_get_listen_time(struct ath_common *common); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 60cc2b6b8be1..fdddb8c320b3 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3475,16 +3475,23 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) { struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; + int ret = 0; + + spin_lock_bh(&ar->data_lock); - if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) { + if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { ath10k_warn(ar, "wmi mgmt tx queue is full\n"); - return -ENOSPC; + ret = -ENOSPC; + goto unlock; } - skb_queue_tail(q, skb); + __skb_queue_tail(q, skb); ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); - return 0; +unlock: + spin_unlock_bh(&ar->data_lock); + + return ret; } static enum ath10k_mac_tx_path @@ -4992,7 +4999,6 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (arvif->nohwcrypt && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { - ret = -EINVAL; ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); goto err; } diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 0f4836fc3b7c..16e052d02c94 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -522,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } break; case DISABLE_KEY: - ath_key_delete(common, key->hw_key_idx); + ath_key_delete(common, key); break; default: ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 3d0dfcf2c246..fc22c5f47927 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -340,11 +340,6 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb) le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } - - /* Ignore broken descriptors. */ - if (usb_endpoint_maxp(endpoint) == 0) - continue; - urbcount = 0; pipe_num = @@ -912,7 +907,7 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, - size, 2000); + size, 2 * HZ); if (ret < 0) { ath6kl_warn("Failed to read usb control message: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index a5a7e0901bab..da3b47b87ecc 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2508,10 +2508,8 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx) goto free_data_skb; for (index = 0; index < num_pri_streams; index++) { - if (WARN_ON(!data_sync_bufs[index].skb)) { - ret = -ENOMEM; + if (WARN_ON(!data_sync_bufs[index].skb)) goto free_data_skb; - } ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, data_sync_bufs[index]. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 96e1f54cccaf..c876dc2437b0 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3345,8 +3345,7 @@ found: "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || - (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) || - (length > cptr)) { + (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 70b5dbe9de6a..a660e40f2df1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -178,8 +178,7 @@ struct ath_frame_info { s8 txq; u8 keyix; u8 rtscts_rate; - u8 retries : 6; - u8 dyn_smps : 1; + u8 retries : 7; u8 baw_tracked : 1; u8 tx_power; enum ath9k_key_type keytype:2; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 15fb14f818f8..75072a8f8cf4 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -586,13 +586,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, return; } - if (pkt_len > 2 * MAX_RX_BUF_SIZE) { - dev_err(&hif_dev->udev->dev, - "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); - RX_STAT_INC(skb_dropped); - return; - } - pad_len = 4 - (pkt_len & 0x3); if (pad_len == 4) pad_len = 0; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index e21df6eae634..9b66f4bb2fc3 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) if (unlikely(r)) { ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", reg_offset, r); - return -1; + return -EIO; } return be32_to_cpu(val); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index e32e35b59829..83103d25bc91 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1463,7 +1463,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, } break; case DISABLE_KEY: - ath_key_delete(common, key->hw_key_idx); + ath_key_delete(common, key); break; default: ret = -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 25c3e5d3fe62..d50e2e8bd998 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -285,7 +285,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah) srev = REG_READ(ah, AR_SREV); - if (srev == -1) { + if (srev == -EIO) { ath_err(ath9k_hw_common(ah), "Failed to read SREV register"); return false; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 52afc1506cf5..831a54415a25 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -814,7 +814,6 @@ struct ath_hw { struct ath9k_pacal_info pacal_info; struct ar5416Stats stats; struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; - DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX); enum ath9k_int imask; u32 imrs2_reg; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 4592c4199807..f1adf4ec85da 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -302,11 +302,6 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) hchan = ah->curchan; } - if (!hchan) { - fastcc = false; - hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef); - } - if (!ath_prepare_reset(sc)) fastcc = false; @@ -528,10 +523,8 @@ irqreturn_t ath_isr(int irq, void *dev) ath9k_debug_sync_cause(sc, sync_cause); status &= ah->imask; /* discard unasked-for bits */ - if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) { - ath9k_hw_kill_interrupts(sc->sc_ah); + if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) return IRQ_HANDLED; - } /* * If there are no status bits set, then this interrupt was not @@ -821,80 +814,12 @@ exit: ieee80211_free_txskb(hw, skb); } -static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) -{ - struct ath_buf *bf; - struct ieee80211_tx_info *txinfo; - struct ath_frame_info *fi; - - list_for_each_entry(bf, txq_list, list) { - if (bf->bf_state.stale || !bf->bf_mpdu) - continue; - - txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; - if (fi->keyix == keyix) - return true; - } - - return false; -} - -static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix) -{ - struct ath_hw *ah = sc->sc_ah; - int i; - struct ath_txq *txq; - bool key_in_use = false; - - for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) { - if (!ATH_TXQ_SETUP(sc, i)) - continue; - txq = &sc->tx.txq[i]; - if (!txq->axq_depth) - continue; - if (!ath9k_hw_numtxpending(ah, txq->axq_qnum)) - continue; - - ath_txq_lock(sc, txq); - key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix); - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { - int idx = txq->txq_tailidx; - - while (!key_in_use && - !list_empty(&txq->txq_fifo[idx])) { - key_in_use = ath9k_txq_list_has_key( - &txq->txq_fifo[idx], keyix); - INCR(idx, ATH_TXFIFO_DEPTH); - } - } - ath_txq_unlock(sc, txq); - } - - return key_in_use; -} - -static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix) -{ - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - - if (!test_bit(keyix, ah->pending_del_keymap) || - ath9k_txq_has_key(sc, keyix)) - return; - - /* No more TXQ frames point to this key cache entry, so delete it. */ - clear_bit(keyix, ah->pending_del_keymap); - ath_key_delete(common, keyix); -} - static void ath9k_stop(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); bool prev_idle; - int i; ath9k_deinit_channel_context(sc); @@ -960,14 +885,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_pcu_lock); - for (i = 0; i < ATH_KEYMAX; i++) - ath9k_pending_key_del(sc, i); - - /* Clear key cache entries explicitly to get rid of any potentially - * remaining keys. - */ - ath9k_cmn_init_crypto(sc->sc_ah); - ath9k_ps_restore(sc); sc->ps_idle = prev_idle; @@ -1599,11 +1516,12 @@ static void ath9k_del_ps_key(struct ath_softc *sc, { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; + struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key }; if (!an->ps_key) return; - ath_key_delete(common, an->ps_key); + ath_key_delete(common, &ps_key); an->ps_key = 0; an->key_idx[0] = 0; } @@ -1765,12 +1683,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw, if (sta) an = (struct ath_node *)sta->drv_priv; - /* Delete pending key cache entries if no more frames are pointing to - * them in TXQs. - */ - for (i = 0; i < ATH_KEYMAX; i++) - ath9k_pending_key_del(sc, i); - switch (cmd) { case SET_KEY: if (sta) @@ -1800,15 +1712,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, } break; case DISABLE_KEY: - if (ath9k_txq_has_key(sc, key->hw_key_idx)) { - /* Delay key cache entry deletion until there are no - * remaining TXQ frames pointing to this entry. - */ - set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap); - ath_hw_keysetmac(common, key->hw_key_idx, NULL); - } else { - ath_key_delete(common, key->hw_key_idx); - } + ath_key_delete(common, key); if (an) { for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) { if (an->key_idx[i] != key->hw_key_idx) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index e8a76f15f373..e1061a144ec8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1228,11 +1228,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, is_40, is_sgi, is_sp); if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; - if (rix >= 8 && fi->dyn_smps) { - info->rates[i].RateFlags |= - ATH9K_RATESERIES_RTS_CTS; - info->flags |= ATH9K_TXDESC_CTSENA; - } info->txpower[i] = ath_get_rate_txpower(sc, bf, rix, is_40, false); @@ -2119,7 +2114,6 @@ static void setup_frame_info(struct ieee80211_hw *hw, fi->keyix = an->ps_key; else fi->keyix = ATH9K_TXKEYIX_INVALID; - fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC; fi->keytype = keytype; fi->framelen = framelen; fi->tx_power = txpower; diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig index 3fc87997fcb3..1a796e5f69ec 100644 --- a/drivers/net/wireless/ath/carl9170/Kconfig +++ b/drivers/net/wireless/ath/carl9170/Kconfig @@ -17,11 +17,13 @@ config CARL9170 config CARL9170_LEDS bool "SoftLED Support" - default y depends on CARL9170 - depends on MAC80211_LEDS + select MAC80211_LEDS + select LEDS_CLASS + select NEW_LEDS + default y help - This option is necessary, if you want your device's LEDs to blink. + This option is necessary, if you want your device' LEDs to blink Say Y, unless you need the LEDs for firmware debugging. diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 2f4b79102a27..0835828ffed7 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -182,12 +182,10 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd, if (cd == NULL) return; list_del(&cd->head); - if (cd->detectors) { - for (i = 0; i < dpd->num_radar_types; i++) { - struct pri_detector *de = cd->detectors[i]; - if (de != NULL) - de->exit(de); - } + for (i = 0; i < dpd->num_radar_types; i++) { + struct pri_detector *de = cd->detectors[i]; + if (de != NULL) + de->exit(de); } kfree(cd->detectors); kfree(cd); diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 61b59a804e30..1816b4e7dc26 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -84,7 +84,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry) } EXPORT_SYMBOL(ath_hw_keyreset); -bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) +static bool ath_hw_keysetmac(struct ath_common *common, + u16 entry, const u8 *mac) { u32 macHi, macLo; u32 unicast_flag = AR_KEYTABLE_VALID; @@ -124,7 +125,6 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) return true; } -EXPORT_SYMBOL(ath_hw_keysetmac); static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, const struct ath_keyval *k, @@ -581,38 +581,29 @@ EXPORT_SYMBOL(ath_key_config); /* * Delete Key. */ -void ath_key_delete(struct ath_common *common, u8 hw_key_idx) +void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key) { - /* Leave CCMP and TKIP (main key) configured to avoid disabling - * encryption for potentially pending frames already in a TXQ with the - * keyix pointing to this key entry. Instead, only clear the MAC address - * to prevent RX processing from using this key cache entry. - */ - if (test_bit(hw_key_idx, common->ccmp_keymap) || - test_bit(hw_key_idx, common->tkip_keymap)) - ath_hw_keysetmac(common, hw_key_idx, NULL); - else - ath_hw_keyreset(common, hw_key_idx); - if (hw_key_idx < IEEE80211_WEP_NKID) + ath_hw_keyreset(common, key->hw_key_idx); + if (key->hw_key_idx < IEEE80211_WEP_NKID) return; - clear_bit(hw_key_idx, common->keymap); - clear_bit(hw_key_idx, common->ccmp_keymap); - if (!test_bit(hw_key_idx, common->tkip_keymap)) + clear_bit(key->hw_key_idx, common->keymap); + clear_bit(key->hw_key_idx, common->ccmp_keymap); + if (key->cipher != WLAN_CIPHER_SUITE_TKIP) return; - clear_bit(hw_key_idx + 64, common->keymap); + clear_bit(key->hw_key_idx + 64, common->keymap); - clear_bit(hw_key_idx, common->tkip_keymap); - clear_bit(hw_key_idx + 64, common->tkip_keymap); + clear_bit(key->hw_key_idx, common->tkip_keymap); + clear_bit(key->hw_key_idx + 64, common->tkip_keymap); if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) { - ath_hw_keyreset(common, hw_key_idx + 32); - clear_bit(hw_key_idx + 32, common->keymap); - clear_bit(hw_key_idx + 64 + 32, common->keymap); + ath_hw_keyreset(common, key->hw_key_idx + 32); + clear_bit(key->hw_key_idx + 32, common->keymap); + clear_bit(key->hw_key_idx + 64 + 32, common->keymap); - clear_bit(hw_key_idx + 32, common->tkip_keymap); - clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap); + clear_bit(key->hw_key_idx + 32, common->tkip_keymap); + clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap); } } EXPORT_SYMBOL(ath_key_delete); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index f83b28f8f0f8..1fe727f2b41b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -127,9 +127,7 @@ static struct ieee80211_supported_band wcn_band_2ghz = { .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40 | - IEEE80211_HT_CAP_LSIG_TXOP_PROT | - IEEE80211_HT_CAP_SGI_40 | - IEEE80211_HT_CAP_SUP_WIDTH_20_40, + IEEE80211_HT_CAP_LSIG_TXOP_PROT, .ht_supported = true, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 2d571fd709c2..2ab6c5951561 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -1,7 +1,6 @@ config WIL6210 tristate "Wilocity 60g WiFi card wil6210 support" select WANT_DEV_COREDUMP - select CRC32 depends on CFG80211 depends on PCI default n diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index a706605cef9a..462310e6e88f 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -2295,7 +2295,7 @@ static u8 b43_gphy_aci_scan(struct b43_wldev *dev) b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF); b43_set_all_gains(dev, 3, 8, 1); - start = (channel > 5) ? channel - 5 : 1; + start = (channel - 5 > 0) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index d1afa74aa144..a5557d70689f 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -5320,7 +5320,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) for (i = 0; i < 4; i++) { if (dev->phy.rev >= 3) - coef[i] = table[i]; + table[i] = coef[i]; else coef[i] = 0; } diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c index 5b1e8890305c..9501420340a9 100644 --- a/drivers/net/wireless/b43legacy/radio.c +++ b/drivers/net/wireless/b43legacy/radio.c @@ -299,7 +299,7 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) & 0x7FFF); b43legacy_set_all_gains(dev, 3, 8, 1); - start = (channel > 5) ? channel - 5 : 1; + start = (channel - 5 > 0) ? channel - 5 : 1; end = (channel + 5 < 14) ? channel + 5 : 13; for (i = start; i <= end; i++) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 8b56aa627487..b820e80d4b4c 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -1221,7 +1221,6 @@ static int brcms_bcma_probe(struct bcma_device *pdev) { struct brcms_info *wl; struct ieee80211_hw *hw; - int ret; dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n", pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class, @@ -1246,16 +1245,11 @@ static int brcms_bcma_probe(struct bcma_device *pdev) wl = brcms_attach(pdev); if (!wl) { pr_err("%s: brcms_attach failed!\n", __func__); - ret = -ENODEV; - goto err_free_ieee80211; + return -ENODEV; } brcms_led_register(wl); return 0; - -err_free_ieee80211: - ieee80211_free_hw(hw); - return ret; } static int brcms_suspend(struct bcma_device *pdev) diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c index de92107549ee..d3acc85932a5 100644 --- a/drivers/net/wireless/cw1200/cw1200_sdio.c +++ b/drivers/net/wireless/cw1200/cw1200_sdio.c @@ -62,7 +62,6 @@ static const struct sdio_device_id cw1200_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, { /* end: all zeroes */ }, }; -MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids); /* hwbus_ops implemetation */ diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c index f4338bce78f4..84624c812a15 100644 --- a/drivers/net/wireless/cw1200/main.c +++ b/drivers/net/wireless/cw1200/main.c @@ -385,7 +385,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, CW1200_LINK_ID_MAX, cw1200_skb_dtor, priv)) { - destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } @@ -397,7 +396,6 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, for (; i > 0; i--) cw1200_queue_deinit(&priv->tx_queue[i - 1]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); - destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c index 028b37ba9425..dd29f46d086b 100644 --- a/drivers/net/wireless/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/ipw2x00/libipw_wx.c @@ -649,10 +649,8 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee, } if (ext->alg != IW_ENCODE_ALG_NONE) { - int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN); - - memcpy(sec.keys[idx], ext->key, key_len); - sec.key_sizes[idx] = key_len; + memcpy(sec.keys[idx], ext->key, ext->key_len); + sec.key_sizes[idx] = ext->key_len; sec.flags |= (1 << idx); if (ext->alg == IW_ENCODE_ALG_WEP) { sec.encode_alg[idx] = SEC_ALG_WEP; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 8945e32f5293..8ba63749b02e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1957,7 +1957,6 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; - int ret; lockdep_assert_held(&mvm->mutex); @@ -1967,22 +1966,6 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); - - /* - * Send a (synchronous) ech command so that we wait for the - * multiple asynchronous MCAST_FILTER_CMD commands sent by - * the interface iterator. Otherwise, we might get here over - * and over again (by userspace just sending a lot of these) - * and the CPU can send them faster than the firmware can - * process them. - * Note that the CPU is still faster - but with this we'll - * actually send fewer commands overall because the CPU will - * not schedule the work in mac80211 as frequently if it's - * still running when rescheduled (possibly multiple times). - */ - ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); - if (ret) - IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index bb81261de45f..13c97f665ba8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -909,7 +909,6 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); - put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } @@ -992,7 +991,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) module_put(THIS_MODULE); return; } - reprobe->dev = get_device(mvm->trans->dev); + reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) { diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 70954b4ef8a2..c19780124ffd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -923,9 +923,6 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) - return false; - if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return false; diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index b020bd6adaf2..8dfe6b2bc703 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -583,14 +583,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - struct iwl_queue *q; - - if (!txq) { - IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); - return; - } - - q = &txq->q; + struct iwl_queue *q = &txq->q; spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { @@ -1341,7 +1334,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - unsigned long flags2; if (WARN(!trans_pcie->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, @@ -1424,10 +1416,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - spin_lock_irqsave(&txq->lock, flags2); + spin_lock_bh(&txq->lock); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_irqrestore(&txq->lock, flags2); + spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -1589,7 +1581,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); out: - spin_unlock_irqrestore(&txq->lock, flags2); + spin_unlock_bh(&txq->lock); free_dup_buf: if (idx < 0) kfree(dup_buf); diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 45d68ee682f6..33ceda296c9c 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1229,10 +1229,6 @@ static int if_sdio_probe(struct sdio_func *func, spin_lock_init(&card->lock); card->workqueue = create_workqueue("libertas_sdio"); - if (unlikely(!card->workqueue)) { - ret = -ENOMEM; - goto err_queue; - } INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1286,7 +1282,6 @@ err_activate_card: lbs_remove_card(priv); free: destroy_workqueue(card->workqueue); -err_queue: while (card->packets) { packet = card->packets; card->packets = card->packets->next; diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 1793611a380c..d271eaf1f949 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -291,7 +291,6 @@ err_add_card: if_usb_reset_device(cardp); dealloc: if_usb_free(cardp); - kfree(cardp); error: return r; @@ -318,7 +317,6 @@ static void if_usb_disconnect(struct usb_interface *intf) /* Unlink and free urb */ if_usb_free(cardp); - kfree(cardp); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index f1e9cbcfdc16..d0c881dd5846 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c @@ -797,6 +797,19 @@ static const struct attribute_group mesh_ie_group = { .attrs = mesh_ie_attrs, }; +static void lbs_persist_config_init(struct net_device *dev) +{ + int ret; + ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group); + ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); +} + +static void lbs_persist_config_remove(struct net_device *dev) +{ + sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group); + sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group); +} + /*************************************************************************** * Initializing and starting, stopping mesh @@ -1008,10 +1021,6 @@ static int lbs_add_mesh(struct lbs_private *priv) SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group; - mesh_dev->sysfs_groups[1] = &boot_opts_group; - mesh_dev->sysfs_groups[2] = &mesh_ie_group; - /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { @@ -1019,10 +1028,19 @@ static int lbs_add_mesh(struct lbs_private *priv) goto err_free_netdev; } + ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); + if (ret) + goto err_unregister; + + lbs_persist_config_init(mesh_dev); + /* Everything successful */ ret = 0; goto done; +err_unregister: + unregister_netdev(mesh_dev); + err_free_netdev: free_netdev(mesh_dev); @@ -1045,6 +1063,8 @@ void lbs_remove_mesh(struct lbs_private *priv) lbs_deb_enter(LBS_DEB_MESH); netif_stop_queue(mesh_dev); netif_carrier_off(mesh_dev); + sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); + lbs_persist_config_remove(mesh_dev); unregister_netdev(mesh_dev); priv->mesh_dev = NULL; kfree(mesh_dev->ieee80211_ptr); diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c index 193f8f70c4ed..799a2efe5793 100644 --- a/drivers/net/wireless/libertas_tf/if_usb.c +++ b/drivers/net/wireless/libertas_tf/if_usb.c @@ -240,7 +240,6 @@ static int if_usb_probe(struct usb_interface *intf, dealloc: if_usb_free(cardp); - kfree(cardp); error: lbtf_deb_leave(LBTF_DEB_MAIN); return -ENOMEM; @@ -265,7 +264,6 @@ static void if_usb_disconnect(struct usb_interface *intf) /* Unlink and free urb */ if_usb_free(cardp); - kfree(cardp); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index b76aaf4163c8..2db8d5c4758d 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1899,15 +1899,6 @@ static void hw_scan_work(struct work_struct *work) memcpy(skb_put(probe, req->ie_len), req->ie, req->ie_len); - if (!ieee80211_tx_prepare_skb(hwsim->hw, - hwsim->hw_scan_vif, - probe, - hwsim->tmp_chan->band, - NULL)) { - kfree_skb(probe); - continue; - } - local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -2788,10 +2779,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } - - if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) - txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; - ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 970aaf2ed3a7..3d0b9324d5bf 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -160,7 +160,8 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) if (new_p) { /* we have one extra ref from the allocator */ - put_page(e->p); + __free_pages(e->p, MT_RX_ORDER); + e->p = new_p; } } @@ -317,6 +318,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, } e = &q->e[q->end]; + e->skb = skb; usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, mt7601u_complete_tx, q); ret = usb_submit_urb(e->urb, GFP_ATOMIC); @@ -334,7 +336,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, q->end = (q->end + 1) % q->entries; q->used++; - e->skb = skb; if (q->used >= q->entries) ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c index 7b917c4613ad..8d8ee0344f7b 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -106,7 +106,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); - return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); + return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); } static void diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index b70eac7d2dd7..c174e79e6df2 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -630,15 +630,14 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac, uint16_t del_ba_param_set; memset(&delba, 0, sizeof(delba)); + delba.del_ba_param_set = cpu_to_le16(tid << DELBA_TID_POS); - del_ba_param_set = tid << DELBA_TID_POS; - + del_ba_param_set = le16_to_cpu(delba.del_ba_param_set); if (initiator) del_ba_param_set |= IEEE80211_DELBA_PARAM_INITIATOR_MASK; else del_ba_param_set &= ~IEEE80211_DELBA_PARAM_INITIATOR_MASK; - delba.del_ba_param_set = cpu_to_le16(del_ba_param_set); memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN); /* We don't wait for the response of this command */ diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 83b7cd5bdf93..6378dfd3b4e8 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -856,8 +856,6 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); - if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) - req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 876c335c3069..c410ef92b084 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1210,14 +1210,6 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, ret = -1; goto done_unmap; } - - /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card - * seems to crash randomly after setting the TX ring write pointer when - * ASPM powersaving is enabled. A workaround seems to be keeping the bus - * busy by reading a random register afterwards. - */ - mwifiex_read_reg(adapter, PCI_VENDOR_ID, &rx_val); - if ((mwifiex_pcie_txbd_not_full(card)) && tx_param->next_pkt_len) { /* have more packets and TxBD still can hold more */ diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 4cdf6450aeed..1a1b1de87583 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -132,8 +132,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); - ret = -1; - goto exit_restore_skb; + return -1; } break; case MWIFIEX_USB_EP_DATA: @@ -474,22 +473,6 @@ static int mwifiex_usb_probe(struct usb_interface *intf, } } - switch (card->usb_boot_state) { - case USB8XXX_FW_DNLD: - /* Reject broken descriptors. */ - if (!card->rx_cmd_ep || !card->tx_cmd_ep) - return -ENODEV; - if (card->bulk_out_maxpktsize == 0) - return -ENODEV; - break; - case USB8XXX_FW_READY: - /* Assume the driver can handle missing endpoints for now. */ - break; - default: - WARN_ON(1); - return -ENODEV; - } - usb_set_intfdata(intf, card); ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops, diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index c6f008796ff1..b1b400b59d86 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1459,7 +1459,6 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); - txq->txd = NULL; return -ENOMEM; } @@ -5783,8 +5782,8 @@ static void mwl8k_fw_state_machine(const struct firmware *fw, void *context) fail: priv->fw_state = FW_STATE_ERROR; complete(&priv->firmware_loading_complete); - mwl8k_release_firmware(priv); device_release_driver(&priv->pdev->dev); + mwl8k_release_firmware(priv); } #define MAX_RESTART_ATTEMPTS 1 diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 8eb73d54b1d6..3c5baccd6792 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -1224,6 +1224,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->len < ETH_HLEN) goto drop; + ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); + if (!ctx) + goto busy; + + memset(ctx->buf, 0, BULK_BUF_SIZE); + buf = ctx->buf->data; + tx_control = 0; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, @@ -1231,13 +1238,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (err) goto drop; - ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); - if (!ctx) - goto drop; - - memset(ctx->buf, 0, BULK_BUF_SIZE); - buf = ctx->buf->data; - { __le16 *tx_cntl = (__le16 *)buf; *tx_cntl = cpu_to_le16(tx_control); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index e1992de500b0..7de18ed10db8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -454,14 +454,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) } } -static int _rtl_init_deferred_work(struct ieee80211_hw *hw) +static void _rtl_init_deferred_work(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct workqueue_struct *wq; - - wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); - if (!wq) - return -ENOMEM; /* <1> timer */ setup_timer(&rtlpriv->works.watchdog_timer, @@ -470,7 +465,11 @@ static int _rtl_init_deferred_work(struct ieee80211_hw *hw) rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); /* <2> work queue */ rtlpriv->works.hw = hw; - rtlpriv->works.rtl_wq = wq; + rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name); + if (unlikely(!rtlpriv->works.rtl_wq)) { + pr_err("Failed to allocate work queue\n"); + return; + } INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, (void *)rtl_watchdog_wq_callback); @@ -482,7 +481,7 @@ static int _rtl_init_deferred_work(struct ieee80211_hw *hw) (void *)rtl_swlps_rfon_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, (void *)rtl_fwevt_wq_callback); - return 0; + } void rtl_deinit_deferred_work(struct ieee80211_hw *hw) @@ -574,7 +573,9 @@ int rtl_init_core(struct ieee80211_hw *hw) rtlmac->link_state = MAC80211_NOLINK; /* <6> init deferred work */ - return _rtl_init_deferred_work(hw); + _rtl_init_deferred_work(hw); + + return 0; } EXPORT_SYMBOL_GPL(rtl_init_core); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 380a2dcb95af..137d7c8645da 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1062,7 +1062,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) _InitPABias(hw); rtl92c_dm_init(hw); exit: - local_irq_disable(); local_irq_restore(flags); return err; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 83cbaac877ea..974387ad1e8c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -42,7 +42,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter, buf, len, &transfer, - USB_CTRL_SET_TIMEOUT); + HZ * 5); if (status < 0) { rsi_dbg(ERR_ZONE, diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c index 247f4310a38f..ede31f048ef9 100644 --- a/drivers/net/wireless/ti/wl1251/cmd.c +++ b/drivers/net/wireless/ti/wl1251/cmd.c @@ -465,12 +465,9 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len, cmd->channels[i].channel = channels[i]->hw_value; } - if (ssid) { - int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN); - - cmd->params.ssid_len = len; - memcpy(cmd->params.ssid, ssid, len); - } + cmd->params.ssid_len = ssid_len; + if (ssid) + memcpy(cmd->params.ssid, ssid, ssid_len); ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); if (ret < 0) { diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index fb42ad9359f2..ae12da7ec6e6 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -647,6 +647,7 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; wl->mr_fw_name = WL127X_FW_NAME_MULTI; @@ -670,6 +671,7 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_LEGACY_NVS | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wl->plt_fw_name = WL127X_PLT_FW_NAME; wl->sr_fw_name = WL127X_FW_NAME_SINGLE; @@ -698,6 +700,7 @@ static int wl12xx_identify_chip(struct wl1271 *wl) wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN | WLCORE_QUIRK_DUAL_PROBE_TMPL | WLCORE_QUIRK_TKIP_HEADER_SPACE | + WLCORE_QUIRK_START_STA_FAILS | WLCORE_QUIRK_AP_ZERO_SESSION_ID; wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, @@ -1515,13 +1518,6 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl) u32 mac1, mac2; int ret; - /* Device may be in ELP from the bootloader or kexec */ - ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); - if (ret < 0) - goto out; - - usleep_range(500000, 700000); - ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]); if (ret < 0) goto out; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 84fa7bc8f338..d6d240528a60 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -2889,8 +2889,21 @@ static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (is_ibss) ret = wl12xx_cmd_role_start_ibss(wl, wlvif); - else + else { + if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) { + /* + * TODO: this is an ugly workaround for wl12xx fw + * bug - we are not able to tx/rx after the first + * start_sta, so make dummy start+stop calls, + * and then call start_sta again. + * this should be fixed in the fw. + */ + wl12xx_cmd_role_start_sta(wl, wlvif); + wl12xx_cmd_role_stop_sta(wl, wlvif); + } + ret = wl12xx_cmd_role_start_sta(wl, wlvif); + } return ret; } diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 4cd94c676fa6..053d29764dbd 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -556,6 +556,9 @@ wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip, /* Each RX/TX transaction requires an end-of-transaction transfer */ #define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0) +/* the first start_role(sta) sometimes doesn't work on wl12xx */ +#define WLCORE_QUIRK_START_STA_FAILS BIT(1) + /* wl127x and SPI don't support SDIO block size alignment */ #define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2) diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index ca2021bcac14..3fbfd19818f1 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h @@ -378,19 +378,6 @@ struct wl3501_get_confirm { u8 mib_value[100]; }; -struct wl3501_req { - u16 beacon_period; - u16 dtim_period; - u16 cap_info; - u8 bss_type; - u8 bssid[ETH_ALEN]; - struct iw_mgmt_essid_pset ssid; - struct iw_mgmt_ds_pset ds_pset; - struct iw_mgmt_cf_pset cf_pset; - struct iw_mgmt_ibss_pset ibss_pset; - struct iw_mgmt_data_rset bss_basic_rset; -}; - struct wl3501_join_req { u16 next_blk; u8 sig_id; @@ -401,7 +388,16 @@ struct wl3501_join_req { u16 probe_delay; u8 timestamp[8]; u8 local_time[8]; - struct wl3501_req req; + u16 beacon_period; + u16 dtim_period; + u16 cap_info; + u8 bss_type; + u8 bssid[ETH_ALEN]; + struct iw_mgmt_essid_pset ssid; + struct iw_mgmt_ds_pset ds_pset; + struct iw_mgmt_cf_pset cf_pset; + struct iw_mgmt_ibss_pset ibss_pset; + struct iw_mgmt_data_rset bss_basic_rset; }; struct wl3501_join_confirm { @@ -446,7 +442,16 @@ struct wl3501_scan_confirm { u16 status; char timestamp[8]; char localtime[8]; - struct wl3501_req req; + u16 beacon_period; + u16 dtim_period; + u16 cap_info; + u8 bss_type; + u8 bssid[ETH_ALEN]; + struct iw_mgmt_essid_pset ssid; + struct iw_mgmt_ds_pset ds_pset; + struct iw_mgmt_cf_pset cf_pset; + struct iw_mgmt_ibss_pset ibss_pset; + struct iw_mgmt_data_rset bss_basic_rset; u8 rssi; }; @@ -465,10 +470,8 @@ struct wl3501_md_req { u16 size; u8 pri; u8 service_class; - struct { - u8 daddr[ETH_ALEN]; - u8 saddr[ETH_ALEN]; - } addr; + u8 daddr[ETH_ALEN]; + u8 saddr[ETH_ALEN]; }; struct wl3501_md_ind { @@ -480,10 +483,8 @@ struct wl3501_md_ind { u8 reception; u8 pri; u8 service_class; - struct { - u8 daddr[ETH_ALEN]; - u8 saddr[ETH_ALEN]; - } addr; + u8 daddr[ETH_ALEN]; + u8 saddr[ETH_ALEN]; }; struct wl3501_md_confirm { diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index f2bb552448c6..99de07d14939 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -457,7 +457,6 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) struct wl3501_md_req sig = { .sig_id = WL3501_SIG_MD_REQ, }; - size_t sig_addr_len = sizeof(sig.addr); u8 *pdata = (char *)data; int rc = -EIO; @@ -473,9 +472,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) goto out; } rc = 0; - memcpy(&sig.addr, pdata, sig_addr_len); - pktlen = len - sig_addr_len; - pdata += sig_addr_len; + memcpy(&sig.daddr[0], pdata, 12); + pktlen = len - 12; + pdata += 12; sig.data = bf; if (((*pdata) * 256 + (*(pdata + 1))) > 1500) { u8 addr4[ETH_ALEN] = { @@ -578,7 +577,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) struct wl3501_join_req sig = { .sig_id = WL3501_SIG_JOIN_REQ, .timeout = 10, - .req.ds_pset = { + .ds_pset = { .el = { .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, .len = 1, @@ -587,7 +586,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas) }, }; - memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req)); + memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72); return wl3501_esbq_exec(this, &sig, sizeof(sig)); } @@ -655,37 +654,35 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr) if (sig.status == WL3501_STATUS_SUCCESS) { pr_debug("success"); if ((this->net_type == IW_MODE_INFRA && - (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || + (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || (this->net_type == IW_MODE_ADHOC && - (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || + (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || this->net_type == IW_MODE_AUTO) { if (!this->essid.el.len) matchflag = 1; else if (this->essid.el.len == 3 && !memcmp(this->essid.essid, "ANY", 3)) matchflag = 1; - else if (this->essid.el.len != sig.req.ssid.el.len) + else if (this->essid.el.len != sig.ssid.el.len) matchflag = 0; - else if (memcmp(this->essid.essid, sig.req.ssid.essid, + else if (memcmp(this->essid.essid, sig.ssid.essid, this->essid.el.len)) matchflag = 0; else matchflag = 1; if (matchflag) { for (i = 0; i < this->bss_cnt; i++) { - if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid, - sig.req.bssid)) { + if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) { matchflag = 0; break; } } } if (matchflag && (i < 20)) { - memcpy(&this->bss_set[i].req, - &sig.req, sizeof(sig.req)); + memcpy(&this->bss_set[i].beacon_period, + &sig.beacon_period, 73); this->bss_cnt++; this->rssi = sig.rssi; - this->bss_set[i].rssi = sig.rssi; } } } else if (sig.status == WL3501_STATUS_TIMEOUT) { @@ -877,19 +874,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) if (this->join_sta_bss < this->bss_cnt) { const int i = this->join_sta_bss; memcpy(this->bssid, - this->bss_set[i].req.bssid, ETH_ALEN); - this->chan = this->bss_set[i].req.ds_pset.chan; + this->bss_set[i].bssid, ETH_ALEN); + this->chan = this->bss_set[i].ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, - &this->bss_set[i].req.ssid.el); + &this->bss_set[i].ssid.el); wl3501_mgmt_auth(this); } } else { const int i = this->join_sta_bss; - memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN); - this->chan = this->bss_set[i].req.ds_pset.chan; + memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN); + this->chan = this->bss_set[i].ds_pset.chan; iw_copy_mgmt_info_element(&this->keep_essid.el, - &this->bss_set[i].req.ssid.el); + &this->bss_set[i].ssid.el); wl3501_online(dev); } } else { @@ -971,8 +968,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, } else { skb->dev = dev; skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ - skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr, - sizeof(sig.addr)); + skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); wl3501_receive(this, skb->data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, dev); @@ -1568,30 +1564,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, for (i = 0; i < this->bss_cnt; ++i) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; - memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN); + memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; - iwe.u.data.length = this->bss_set[i].req.ssid.el.len; + iwe.u.data.length = this->bss_set[i].ssid.el.len; current_ev = iwe_stream_add_point(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, - this->bss_set[i].req.ssid.essid); + this->bss_set[i].ssid.essid); iwe.cmd = SIOCGIWMODE; - iwe.u.mode = this->bss_set[i].req.bss_type; + iwe.u.mode = this->bss_set[i].bss_type; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; - iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan; + iwe.u.freq.m = this->bss_set[i].ds_pset.chan; iwe.u.freq.e = 0; current_ev = iwe_stream_add_event(info, current_ev, extra + IW_SCAN_MAX_DATA, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWENCODE; - if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) + if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 93f7659e7595..66260ea74d7d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -161,15 +161,13 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id) { struct xenvif_queue *queue = dev_id; int old; - bool has_rx, has_tx; old = xenvif_atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); WARN(old, "Interrupt while EOI pending\n"); - has_tx = xenvif_handle_tx_interrupt(queue); - has_rx = xenvif_handle_rx_interrupt(queue); - - if (!has_rx && !has_tx) { + /* Use bitwise or as we need to call both functions. */ + if ((!xenvif_handle_tx_interrupt(queue) | + !xenvif_handle_rx_interrupt(queue))) { atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); } @@ -664,7 +662,6 @@ err_tx_unbind: queue->tx_irq = 0; err_unmap: xenvif_unmap_frontend_rings(queue); - netif_napi_del(&queue->napi); err: return err; } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 044478c9adad..ee7a800c16d5 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -189,15 +189,11 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) spin_lock_irqsave(&queue->rx_queue.lock, flags); - if (queue->rx_queue_len >= queue->rx_queue_max) { - netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); - kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; - } else { - __skb_queue_tail(&queue->rx_queue, skb); + __skb_queue_tail(&queue->rx_queue, skb); - queue->rx_queue_len += skb->len; - } + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) + netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); spin_unlock_irqrestore(&queue->rx_queue.lock, flags); } @@ -247,7 +243,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) break; xenvif_rx_dequeue(queue); kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; } } @@ -999,7 +994,7 @@ check_frags: * the header's copy failed, and they are * sharing a slot, send an error */ - if (i == 0 && !first_shinfo && sharedslot) + if (i == 0 && sharedslot) xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); else @@ -1802,15 +1797,7 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) NULL, queue->pages_to_map, nr_mops); - if (ret) { - unsigned int i; - - netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", - nr_mops, ret); - for (i = 0; i < nr_mops; ++i) - WARN_ON_ONCE(queue->tx_map_ops[i].status == - GNTST_okay); - } + BUG_ON(ret); } work_done = xenvif_tx_submit(queue); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 683fd8560f2b..56ebd8267386 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -697,14 +697,12 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) return -ENOMEM; snprintf(node, maxlen, "%s/rate", dev->nodename); vif->credit_watch.node = node; - vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = xen_net_rate_changed; err = register_xenbus_watch(&vif->credit_watch); if (err) { pr_err("Failed to set watcher %s\n", vif->credit_watch.node); kfree(node); vif->credit_watch.node = NULL; - vif->credit_watch.will_handle = NULL; vif->credit_watch.callback = NULL; } return err; @@ -849,15 +847,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 637d5e894012..7d4c0c46a889 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -120,17 +120,21 @@ struct netfront_queue { /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries - * are linked from tx_skb_freelist through tx_link. + * are linked from tx_skb_freelist through skb_entry.link. + * + * NB. Freelist index entries are always going to be less than + * PAGE_OFFSET, whereas pointers to skbs will always be equal or + * greater than PAGE_OFFSET: we use this property to distinguish + * them. */ - struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; - unsigned short tx_link[NET_TX_RING_SIZE]; -#define TX_LINK_NONE 0xffff -#define TX_PENDING 0xfffe + union skb_entry { + struct sk_buff *skb; + unsigned long link; + } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; - unsigned int tx_pend_queue; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; @@ -141,9 +145,6 @@ struct netfront_queue { struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; - - unsigned int rx_rsp_unconsumed; - spinlock_t rx_cons_lock; }; struct netfront_info { @@ -159,9 +160,6 @@ struct netfront_info { struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; - /* Is device behaving sane? */ - bool broken; - atomic_t rx_gso_checksum_fixup; }; @@ -170,25 +168,33 @@ struct netfront_rx_info { struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; +static void skb_entry_set_link(union skb_entry *list, unsigned short id) +{ + list->link = id; +} + +static int skb_entry_is_link(const union skb_entry *list) +{ + BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); + return (unsigned long)list->skb < PAGE_OFFSET; +} + /* * Access macros for acquiring freeing slots in tx_skbs[]. */ -static void add_id_to_list(unsigned *head, unsigned short *list, - unsigned short id) +static void add_id_to_freelist(unsigned *head, union skb_entry *list, + unsigned short id) { - list[id] = *head; + skb_entry_set_link(&list[id], *head); *head = id; } -static unsigned short get_id_from_list(unsigned *head, unsigned short *list) +static unsigned short get_id_from_freelist(unsigned *head, + union skb_entry *list) { unsigned int id = *head; - - if (id != TX_LINK_NONE) { - *head = list[id]; - list[id] = TX_LINK_NONE; - } + *head = list[id].link; return id; } @@ -346,7 +352,7 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL; - if (!np->queues || np->broken) + if (!np->queues) return -ENODEV; for (i = 0; i < num_queues; ++i) { @@ -368,55 +374,32 @@ static int xennet_open(struct net_device *dev) return 0; } -static bool xennet_tx_buf_gc(struct netfront_queue *queue) +static void xennet_tx_buf_gc(struct netfront_queue *queue) { RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; - bool work_done = false; - const struct device *dev = &queue->info->netdev->dev; BUG_ON(!netif_carrier_ok(queue->info->netdev)); do { prod = queue->tx.sring->rsp_prod; - if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { - dev_alert(dev, "Illegal number of responses %u\n", - prod - queue->tx.rsp_cons); - goto err; - } rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = queue->tx.rsp_cons; cons != prod; cons++) { - struct xen_netif_tx_response txrsp; + struct xen_netif_tx_response *txrsp; - work_done = true; - - RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); - if (txrsp.status == XEN_NETIF_RSP_NULL) + txrsp = RING_GET_RESPONSE(&queue->tx, cons); + if (txrsp->status == XEN_NETIF_RSP_NULL) continue; - id = txrsp.id; - if (id >= RING_SIZE(&queue->tx)) { - dev_alert(dev, - "Response has incorrect id (%u)\n", - id); - goto err; - } - if (queue->tx_link[id] != TX_PENDING) { - dev_alert(dev, - "Response for inactive request\n"); - goto err; - } - - queue->tx_link[id] = TX_LINK_NONE; - skb = queue->tx_skbs[id]; - queue->tx_skbs[id] = NULL; + id = txrsp->id; + skb = queue->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( queue->grant_tx_ref[id]) != 0)) { - dev_alert(dev, - "Grant still in use by backend domain\n"); - goto err; + pr_alert("%s: warning -- grant still in use by backend domain\n", + __func__); + BUG(); } gnttab_end_foreign_access_ref( queue->grant_tx_ref[id], GNTMAP_readonly); @@ -424,7 +407,7 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_page[id] = NULL; - add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); + add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); dev_kfree_skb_irq(skb); } @@ -444,22 +427,13 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); xennet_maybe_wake_tx(queue); - - return work_done; - - err: - queue->info->broken = true; - dev_alert(dev, "Disabled for further use\n"); - - return work_done; } struct xennet_gnttab_make_txreq { struct netfront_queue *queue; struct sk_buff *skb; struct page *page; - struct xen_netif_tx_request *tx; /* Last request on ring page */ - struct xen_netif_tx_request tx_local; /* Last request local copy*/ + struct xen_netif_tx_request *tx; /* Last request */ unsigned int size; }; @@ -475,7 +449,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, struct netfront_queue *queue = info->queue; struct sk_buff *skb = info->skb; - id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); + id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); @@ -483,37 +457,34 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); - queue->tx_skbs[id] = skb; + queue->tx_skbs[id].skb = skb; queue->grant_tx_page[id] = page; queue->grant_tx_ref[id] = ref; - info->tx_local.id = id; - info->tx_local.gref = ref; - info->tx_local.offset = offset; - info->tx_local.size = len; - info->tx_local.flags = 0; - - *tx = info->tx_local; - - /* - * Put the request in the pending queue, it will be set to be pending - * when the producer index is about to be raised. - */ - add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); + tx->id = id; + tx->gref = ref; + tx->offset = offset; + tx->size = len; + tx->flags = 0; info->tx = tx; - info->size += info->tx_local.size; + info->size += tx->size; } static struct xen_netif_tx_request *xennet_make_first_txreq( - struct xennet_gnttab_make_txreq *info, - unsigned int offset, unsigned int len) + struct netfront_queue *queue, struct sk_buff *skb, + struct page *page, unsigned int offset, unsigned int len) { - info->size = 0; + struct xennet_gnttab_make_txreq info = { + .queue = queue, + .skb = skb, + .page = page, + .size = 0, + }; - gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); + gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); - return info->tx; + return info.tx; } static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, @@ -526,27 +497,35 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, xennet_tx_setup_grant(gfn, offset, len, data); } -static void xennet_make_txreqs( - struct xennet_gnttab_make_txreq *info, - struct page *page, +static struct xen_netif_tx_request *xennet_make_txreqs( + struct netfront_queue *queue, struct xen_netif_tx_request *tx, + struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) { + struct xennet_gnttab_make_txreq info = { + .queue = queue, + .skb = skb, + .tx = tx, + }; + /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (len) { - info->page = page; - info->size = 0; + info.page = page; + info.size = 0; gnttab_foreach_grant_in_range(page, offset, len, xennet_make_one_txreq, - info); + &info); page++; offset = 0; - len -= info->size; + len -= info.size; } + + return info.tx; } /* @@ -593,22 +572,13 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, return queue_idx; } -static void xennet_mark_tx_pending(struct netfront_queue *queue) -{ - unsigned int i; - - while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != - TX_LINK_NONE) - queue->tx_link[i] = TX_PENDING; -} - #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); - struct xen_netif_tx_request *first_tx; + struct xen_netif_tx_request *tx, *first_tx; unsigned int i; int notify; int slots; @@ -617,15 +587,12 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int len; unsigned long flags; struct netfront_queue *queue = NULL; - struct xennet_gnttab_make_txreq info = { }; unsigned int num_queues = dev->real_num_tx_queues; u16 queue_index; /* Drop the packet if no queues are set up */ if (num_queues < 1) goto drop; - if (unlikely(np->broken)) - goto drop; /* Determine which queue to transmit this SKB on */ queue_index = skb_get_queue_mapping(skb); queue = &np->queues[queue_index]; @@ -662,24 +629,21 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* First request for the linear area. */ - info.queue = queue; - info.skb = skb; - info.page = page; - first_tx = xennet_make_first_txreq(&info, offset, len); - offset += info.tx_local.size; + first_tx = tx = xennet_make_first_txreq(queue, skb, + page, offset, len); + offset += tx->size; if (offset == PAGE_SIZE) { page++; offset = 0; } - len -= info.tx_local.size; + len -= tx->size; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ - first_tx->flags |= XEN_NETTXF_csum_blank | - XEN_NETTXF_data_validated; + tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ - first_tx->flags |= XEN_NETTXF_data_validated; + tx->flags |= XEN_NETTXF_data_validated; /* Optional extra info after the first request. */ if (skb_shinfo(skb)->gso_size) { @@ -688,7 +652,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); - first_tx->flags |= XEN_NETTXF_extra_info; + tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? @@ -702,21 +666,19 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) } /* Requests for the rest of the linear area. */ - xennet_make_txreqs(&info, page, offset, len); + tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); /* Requests for all the frags. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - xennet_make_txreqs(&info, skb_frag_page(frag), - frag->page_offset, + tx = xennet_make_txreqs(queue, tx, skb, + skb_frag_page(frag), frag->page_offset, skb_frag_size(frag)); } /* First request has the packet length. */ first_tx->size = skb->len; - xennet_mark_tx_pending(queue); - RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); @@ -756,16 +718,6 @@ static int xennet_close(struct net_device *dev) return 0; } -static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) -{ - unsigned long flags; - - spin_lock_irqsave(&queue->rx_cons_lock, flags); - queue->rx.rsp_cons = val; - queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); - spin_unlock_irqrestore(&queue->rx_cons_lock, flags); -} - static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, grant_ref_t ref) { @@ -784,7 +736,7 @@ static int xennet_get_extras(struct netfront_queue *queue, RING_IDX rp) { - struct xen_netif_extra_info extra; + struct xen_netif_extra_info *extra; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; int err = 0; @@ -800,24 +752,26 @@ static int xennet_get_extras(struct netfront_queue *queue, break; } - RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); + extra = (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&queue->rx, ++cons); - if (unlikely(!extra.type || - extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + if (unlikely(!extra->type || + extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", - extra.type); + extra->type); err = -EINVAL; } else { - extras[extra.type - 1] = extra; + memcpy(&extras[extra->type - 1], extra, + sizeof(*extra)); } skb = xennet_get_rx_skb(queue, cons); ref = xennet_get_rx_ref(queue, cons); xennet_move_rx_slot(queue, skb, ref); - } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); + } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); - xennet_set_rx_rsp_cons(queue, cons); + queue->rx.rsp_cons = cons; return err; } @@ -825,7 +779,7 @@ static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { - struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; + struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &queue->info->netdev->dev; RING_IDX cons = queue->rx.rsp_cons; @@ -883,8 +837,7 @@ next: break; } - RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); - rx = &rx_local; + rx = RING_GET_RESPONSE(&queue->rx, cons + slots); skb = xennet_get_rx_skb(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots); slots++; @@ -897,7 +850,7 @@ next: } if (unlikely(err)) - xennet_set_rx_rsp_cons(queue, cons + slots); + queue->rx.rsp_cons = cons + slots; return err; } @@ -939,11 +892,10 @@ static int xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { - struct xen_netif_rx_response rx; + struct xen_netif_rx_response *rx = + RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); - if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; @@ -951,21 +903,20 @@ static int xennet_fill_frags(struct netfront_queue *queue, __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { - xennet_set_rx_rsp_cons(queue, - ++cons + skb_queue_len(list)); + queue->rx.rsp_cons = ++cons + skb_queue_len(list); kfree_skb(nskb); return -ENOENT; } skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(nfrag), - rx.offset, rx.status, PAGE_SIZE); + rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); } - xennet_set_rx_rsp_cons(queue, cons); + queue->rx.rsp_cons = cons; return 0; } @@ -1052,19 +1003,12 @@ static int xennet_poll(struct napi_struct *napi, int budget) skb_queue_head_init(&tmpq); rp = queue->rx.sring->rsp_prod; - if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { - dev_alert(&dev->dev, "Illegal number of responses %u\n", - rp - queue->rx.rsp_cons); - queue->info->broken = true; - spin_unlock(&queue->rx_lock); - return 0; - } rmb(); /* Ensure we see queued responses up to 'rp'. */ i = queue->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { - RING_COPY_RESPONSE(&queue->rx, i, rx); + memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(queue, &rinfo, rp, &tmpq); @@ -1086,9 +1030,7 @@ err: if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); - xennet_set_rx_rsp_cons(queue, - queue->rx.rsp_cons + - skb_queue_len(&tmpq)); + queue->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } @@ -1112,8 +1054,7 @@ err: __skb_queue_tail(&rxq, skb); - i = queue->rx.rsp_cons + 1; - xennet_set_rx_rsp_cons(queue, i); + i = ++queue->rx.rsp_cons; work_done++; } @@ -1191,18 +1132,17 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ - if (!queue->tx_skbs[i]) + if (skb_entry_is_link(&queue->tx_skbs[i])) continue; - skb = queue->tx_skbs[i]; - queue->tx_skbs[i] = NULL; + skb = queue->tx_skbs[i].skb; get_page(queue->grant_tx_page[i]); gnttab_end_foreign_access(queue->grant_tx_ref[i], GNTMAP_readonly, (unsigned long)page_address(queue->grant_tx_page[i])); queue->grant_tx_page[i] = NULL; queue->grant_tx_ref[i] = GRANT_INVALID_REF; - add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); + add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); dev_kfree_skb_irq(skb); } } @@ -1297,79 +1237,34 @@ static int xennet_set_features(struct net_device *dev, return 0; } -static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) { + struct netfront_queue *queue = dev_id; unsigned long flags; - if (unlikely(queue->info->broken)) - return false; - spin_lock_irqsave(&queue->tx_lock, flags); - if (xennet_tx_buf_gc(queue)) - *eoi = 0; + xennet_tx_buf_gc(queue); spin_unlock_irqrestore(&queue->tx_lock, flags); - return true; -} - -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) -{ - unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; - - if (likely(xennet_handle_tx(dev_id, &eoiflag))) - xen_irq_lateeoi(irq, eoiflag); - return IRQ_HANDLED; } -static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) -{ - unsigned int work_queued; - unsigned long flags; - - if (unlikely(queue->info->broken)) - return false; - - spin_lock_irqsave(&queue->rx_cons_lock, flags); - work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); - if (work_queued > queue->rx_rsp_unconsumed) { - queue->rx_rsp_unconsumed = work_queued; - *eoi = 0; - } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { - const struct device *dev = &queue->info->netdev->dev; - - spin_unlock_irqrestore(&queue->rx_cons_lock, flags); - dev_alert(dev, "RX producer index going backwards\n"); - dev_alert(dev, "Disabled for further use\n"); - queue->info->broken = true; - return false; - } - spin_unlock_irqrestore(&queue->rx_cons_lock, flags); - - if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) - napi_schedule(&queue->napi); - - return true; -} - static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) { - unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; + struct netfront_queue *queue = dev_id; + struct net_device *dev = queue->info->netdev; - if (likely(xennet_handle_rx(dev_id, &eoiflag))) - xen_irq_lateeoi(irq, eoiflag); + if (likely(netif_carrier_ok(dev) && + RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) + napi_schedule(&queue->napi); return IRQ_HANDLED; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { - unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; - - if (xennet_handle_tx(dev_id, &eoiflag) && - xennet_handle_rx(dev_id, &eoiflag)) - xen_irq_lateeoi(irq, eoiflag); - + xennet_tx_interrupt(irq, dev_id); + xennet_rx_interrupt(irq, dev_id); return IRQ_HANDLED; } @@ -1380,10 +1275,6 @@ static void xennet_poll_controller(struct net_device *dev) struct netfront_info *info = netdev_priv(dev); unsigned int num_queues = dev->real_num_tx_queues; unsigned int i; - - if (info->broken) - return; - for (i = 0; i < num_queues; ++i) xennet_interrupt(0, &info->queues[i]); } @@ -1563,10 +1454,6 @@ static int netfront_resume(struct xenbus_device *dev) dev_dbg(&dev->dev, "%s\n", dev->nodename); - netif_tx_lock_bh(info->netdev); - netif_device_detach(info->netdev); - netif_tx_unlock_bh(info->netdev); - xennet_disconnect_backend(info); return 0; } @@ -1601,10 +1488,9 @@ static int setup_netfront_single(struct netfront_queue *queue) if (err < 0) goto fail; - err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, - xennet_interrupt, 0, - queue->info->netdev->name, - queue); + err = bind_evtchn_to_irqhandler(queue->tx_evtchn, + xennet_interrupt, + 0, queue->info->netdev->name, queue); if (err < 0) goto bind_fail; queue->rx_evtchn = queue->tx_evtchn; @@ -1632,18 +1518,18 @@ static int setup_netfront_split(struct netfront_queue *queue) snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), "%s-tx", queue->name); - err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, - xennet_tx_interrupt, 0, - queue->tx_irq_name, queue); + err = bind_evtchn_to_irqhandler(queue->tx_evtchn, + xennet_tx_interrupt, + 0, queue->tx_irq_name, queue); if (err < 0) goto bind_tx_fail; queue->tx_irq = err; snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), "%s-rx", queue->name); - err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, - xennet_rx_interrupt, 0, - queue->rx_irq_name, queue); + err = bind_evtchn_to_irqhandler(queue->rx_evtchn, + xennet_rx_interrupt, + 0, queue->rx_irq_name, queue); if (err < 0) goto bind_rx_fail; queue->rx_irq = err; @@ -1745,7 +1631,6 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); - spin_lock_init(&queue->rx_cons_lock); setup_timer(&queue->rx_refill_timer, rx_refill_timeout, (unsigned long)queue); @@ -1754,15 +1639,13 @@ static int xennet_init_queue(struct netfront_queue *queue) snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", devid, queue->id); - /* Initialise tx_skb_freelist as a free chain containing every entry. */ + /* Initialise tx_skbs as a free chain containing every entry. */ queue->tx_skb_freelist = 0; - queue->tx_pend_queue = TX_LINK_NONE; for (i = 0; i < NET_TX_RING_SIZE; i++) { - queue->tx_link[i] = i + 1; + skb_entry_set_link(&queue->tx_skbs[i], i+1); queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_page[i] = NULL; } - queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { @@ -1972,9 +1855,6 @@ static int talk_to_netback(struct xenbus_device *dev, if (info->queues) xennet_destroy_queues(info); - /* For the case of a reconnect reset the "broken" indicator. */ - info->broken = false; - err = xennet_create_queues(info, &num_queues); if (err < 0) { xenbus_dev_fatal(dev, err, "creating queues"); @@ -2134,10 +2014,6 @@ static int xennet_connect(struct net_device *dev) * domain a kick because we've probably just requeued some * packets. */ - netif_tx_lock_bh(np->netdev); - netif_device_attach(np->netdev); - netif_tx_unlock_bh(np->netdev); - netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c index 9e0d74a08d58..bb3d5ea9869c 100644 --- a/drivers/nfc/pn533.c +++ b/drivers/nfc/pn533.c @@ -1250,9 +1250,6 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) return false; - if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE) - return false; - return true; } @@ -2592,7 +2589,7 @@ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb) frag = pn533_alloc_skb(dev, frag_size); if (!frag) { skb_queue_purge(&dev->fragment_skb); - return -ENOMEM; + break; } if (!dev->tgt_mode) { @@ -2662,7 +2659,7 @@ static int pn533_transceive(struct nfc_dev *nfc_dev, /* jumbo frame ? */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { rc = pn533_fill_fragment_skbs(dev, skb); - if (rc < 0) + if (rc <= 0) goto error; skb = skb_dequeue(&dev->fragment_skb); @@ -2734,7 +2731,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) /* let's split in multiple chunks if size's too big */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { rc = pn533_fill_fragment_skbs(dev, skb); - if (rc < 0) + if (rc <= 0) goto error; /* get the first skb */ diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 3ded31873d11..3ffbed72adf7 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -936,11 +936,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev) skb = port100_alloc_skb(dev, 0); if (!skb) - return 0; + return -ENOMEM; resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb); if (IS_ERR(resp)) - return 0; + return PTR_ERR(resp); if (resp->len < 8) mask = 0; diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index b387845d3c72..64a90252c57f 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -304,10 +304,8 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info) if (ret < 0) return ret; - if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) { - release_firmware(fw->fw); + if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) return -EINVAL; - } memcpy(fw->date, fw->fw->data + 0x00, 12); fw->date[12] = '\0'; diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 058783178677..651b8d19d324 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -303,16 +303,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(state); -static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) +static ssize_t available_slots_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct device *dev; + struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; - dev = ndd->dev; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { @@ -324,18 +324,6 @@ static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) nvdimm_bus_unlock(dev); return rc; } - -static ssize_t available_slots_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t rc; - - device_lock(dev); - rc = __available_slots_show(dev_get_drvdata(dev), buf); - device_unlock(dev); - - return rc; -} static DEVICE_ATTR_RO(available_slots); static struct attribute *nvdimm_attributes[] = { diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 5fab68838651..fc870e55bb66 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -27,6 +27,7 @@ enum { * BTT instance */ ND_MAX_LANES = 256, + SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, #if IS_ENABLED(CONFIG_NVDIMM_PFN) ND_PFN_ALIGN = PAGES_PER_SECTION * PAGE_SIZE, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 0b8b2ee65219..5d6d1bb4f110 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -815,8 +815,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, *p-- = 0; /* clear msb bits if any leftover in the last byte */ - if (cell->nbits % BITS_PER_BYTE) - *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); + *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); } static int __nvmem_cell_read(struct nvmem_device *nvmem, diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 5d7072287e6d..cc3708ea8084 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -998,7 +998,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while (nents && sg_dma_len(sglist)) { + while(sg_dma_len(sglist) && nents--) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1006,7 +1006,6 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); ++sglist; - nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 88e760c88aba..8524faf28acb 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -160,6 +160,15 @@ struct dino_device (struct dino_device *)__pdata; }) +/* Check if PCI device is behind a Card-mode Dino. */ +static int pci_dev_is_behind_card_dino(struct pci_dev *dev) +{ + struct dino_device *dino_dev; + + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); + return is_card_dino(&dino_dev->hba.dev->id); +} + /* * Dino Configuration Space Accessor Functions */ @@ -443,15 +452,6 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev) DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus ); #ifdef CONFIG_TULIP -/* Check if PCI device is behind a Card-mode Dino. */ -static int pci_dev_is_behind_card_dino(struct pci_dev *dev) -{ - struct dino_device *dino_dev; - - dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge)); - return is_card_dino(&dino_dev->hba.dev->id); -} - static void pci_fixup_tulip(struct pci_dev *dev) { if (!pci_dev_is_behind_card_dino(dev)) diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 1b4aacf2ff9a..3651c3871d5b 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -992,10 +992,8 @@ pdcs_register_pathentries(void) entry->kobj.kset = paths_kset; err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL, "%s", entry->name); - if (err) { - kobject_put(&entry->kobj); + if (err) return err; - } /* kobject is now registered */ write_lock(&entry->rw_lock); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 02a45be166e8..b854de39c7ff 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (nents && sg_dma_len(sglist)) { + while (sg_dma_len(sglist) && nents--) { sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction); #ifdef SBA_COLLECT_STATS @@ -1059,7 +1059,6 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; - nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index b6d808037045..2e21af43d91e 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c @@ -534,7 +534,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, goto out; /* Yield the port for a while. */ - if (dev->port->irq != PARPORT_IRQ_NONE) { + if (count && dev->port->irq != PARPORT_IRQ_NONE) { parport_release (dev); schedule_timeout_interruptible(msecs_to_jiffies(40)); parport_claim_or_block (dev); diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c index b6a099371ad2..a6456b578269 100644 --- a/drivers/pci/host/pci-xgene-msi.c +++ b/drivers/pci/host/pci-xgene-msi.c @@ -393,9 +393,13 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu) if (!msi_group->gic_irq) continue; - irq_set_chained_handler_and_data(msi_group->gic_irq, - xgene_msi_isr, msi_group); - + irq_set_chained_handler(msi_group->gic_irq, + xgene_msi_isr); + err = irq_set_handler_data(msi_group->gic_irq, msi_group); + if (err) { + pr_err("failed to register GIC IRQ handler\n"); + return -EINVAL; + } /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index d0f5c526c8e6..6727471ea5b4 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -538,7 +538,6 @@ static void enable_slot(struct acpiphp_slot *slot) slot->flags &= (~SLOT_ENABLED); continue; } - pci_dev_put(dev); } } diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index ca9d832bd9f8..a796301ea03f 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -39,11 +39,12 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - strscpy(drc_name, buf, nbytes + 1); + memcpy(drc_name, buf, nbytes); end = strchr(drc_name, '\n'); - if (end) - *end = '\0'; + if (!end) + end = &drc_name[nbytes]; + *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) @@ -69,11 +70,12 @@ static ssize_t remove_slot_store(struct kobject *kobj, if (nbytes >= MAX_DRC_NAME_LEN) return 0; - strscpy(drc_name, buf, nbytes + 1); + memcpy(drc_name, buf, nbytes); end = strchr(drc_name, '\n'); - if (end) - *end = '\0'; + if (!end) + end = &drc_name[nbytes]; + *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 92e78547b00a..5d5e61d6c548 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -187,31 +187,24 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) * reliably as devices without an INTx disable bit will then generate a * level IRQ which will never be cleared. */ -void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - raw_spinlock_t *lock = &desc->dev->msi_lock; - unsigned long flags; + u32 mask_bits = desc->masked; if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) - return; + return 0; - raw_spin_lock_irqsave(lock, flags); - desc->masked &= ~mask; - desc->masked |= flag; + mask_bits &= ~mask; + mask_bits |= flag; pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, - desc->masked); - raw_spin_unlock_irqrestore(lock, flags); -} + mask_bits); -static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -{ - __pci_msi_desc_mask_irq(desc, mask, flag); + return mask_bits; } -static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) { - return desc->mask_base + - desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; + desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } /* @@ -320,29 +313,13 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) if (dev->current_state != PCI_D0) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { - void __iomem *base = pci_msix_desc_addr(entry); - bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); - - /* - * The specification mandates that the entry is masked - * when the message is modified: - * - * "If software changes the Address or Data value of an - * entry while the entry is unmasked, the result is - * undefined." - */ - if (unmasked) - __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); + void __iomem *base; + base = entry->mask_base + + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); - - if (unmasked) - __pci_msix_desc_mask_irq(entry, 0); - - /* Ensure that the writes are visible in the device */ - readl(base + PCI_MSIX_ENTRY_DATA); } else { int pos = dev->msi_cap; u16 msgctl; @@ -363,8 +340,6 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); } - /* Ensure that the writes are visible in the device */ - pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); } entry->msg = *msg; } @@ -390,6 +365,18 @@ static void free_msi_irqs(struct pci_dev *dev) for (i = 0; i < entry->nvec_used; i++) BUG_ON(irq_has_action(entry->irq + i)); + pci_msi_teardown_msi_irqs(dev); + + list_for_each_entry_safe(entry, tmp, msi_list, list) { + if (entry->msi_attrib.is_msix) { + if (list_is_last(&entry->list, msi_list)) + iounmap(entry->mask_base); + } + + list_del(&entry->list); + kfree(entry); + } + if (dev->msi_irq_groups) { sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); msi_attrs = dev->msi_irq_groups[0]->attrs; @@ -405,18 +392,6 @@ static void free_msi_irqs(struct pci_dev *dev) kfree(dev->msi_irq_groups); dev->msi_irq_groups = NULL; } - - pci_msi_teardown_msi_irqs(dev); - - list_for_each_entry_safe(entry, tmp, msi_list, list) { - if (entry->msi_attrib.is_msix) { - if (list_is_last(&entry->list, msi_list)) - iounmap(entry->mask_base); - } - - list_del(&entry->list); - free_msi_entry(entry); - } } static void pci_intx_for_msi(struct pci_dev *dev, int enable) @@ -649,21 +624,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) { - msi_mask_irq(entry, mask, 0); + msi_mask_irq(entry, mask, ~mask); free_msi_irqs(dev); return ret; } ret = msi_verify_entries(dev); if (ret) { - msi_mask_irq(entry, mask, 0); + msi_mask_irq(entry, mask, ~mask); free_msi_irqs(dev); return ret; } ret = populate_msi_sysfs(dev); if (ret) { - msi_mask_irq(entry, mask, 0); + msi_mask_irq(entry, mask, ~mask); free_msi_irqs(dev); return ret; } @@ -702,7 +677,6 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec) { struct msi_desc *entry; - void __iomem *addr; int i; for (i = 0; i < nvec; i++) { @@ -723,36 +697,27 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, entry->mask_base = base; entry->nvec_used = 1; - addr = pci_msix_desc_addr(entry); - entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); } return 0; } -static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) +static void msix_program_entries(struct pci_dev *dev, + struct msix_entry *entries) { struct msi_desc *entry; + int i = 0; for_each_pci_msi_entry(entry, dev) { - if (entries) { - entries->vector = entry->irq; - entries++; - } - } -} - -static void msix_mask_all(void __iomem *base, int tsize) -{ - u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; - int i; - - if (pci_msi_ignore_mask) - return; + int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; - for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) - writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); + entries[i].vector = entry->irq; + entry->masked = readl(entry->mask_base + offset); + msix_mask_irq(entry, 1); + i++; + } } /** @@ -768,33 +733,22 @@ static void msix_mask_all(void __iomem *base, int tsize) static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) { - void __iomem *base; - int ret, tsize; + int ret; u16 control; + void __iomem *base; - /* - * Some devices require MSI-X to be enabled before the MSI-X - * registers can be accessed. Mask all the vectors to prevent - * interrupts coming in before they're fully set up. - */ - pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | - PCI_MSIX_FLAGS_ENABLE); + /* Ensure MSI-X is disabled while it is set up */ + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ - tsize = msix_table_size(control); - base = msix_map_region(dev, tsize); - if (!base) { - ret = -ENOMEM; - goto out_disable; - } - - /* Ensure that all table entries are masked. */ - msix_mask_all(base, tsize); + base = msix_map_region(dev, msix_table_size(control)); + if (!base) + return -ENOMEM; ret = msix_setup_entries(dev, base, entries, nvec); if (ret) - goto out_disable; + return ret; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) @@ -805,7 +759,15 @@ static int msix_capability_init(struct pci_dev *dev, if (ret) goto out_free; - msix_update_entries(dev, entries); + /* + * Some devices require MSI-X to be enabled before we can touch the + * MSI-X registers. We need to mask all the vectors to prevent + * interrupts coming in before they're fully set up. + */ + pci_msix_clear_and_set_ctrl(dev, 0, + PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); + + msix_program_entries(dev, entries); ret = populate_msi_sysfs(dev); if (ret) @@ -839,9 +801,6 @@ out_avail: out_free: free_msi_irqs(dev); -out_disable: - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); - return ret; } @@ -929,7 +888,8 @@ void pci_msi_shutdown(struct pci_dev *dev) /* Return the device with MSI unmasked as initial states */ mask = msi_mask(desc->msi_attrib.multi_cap); - msi_mask_irq(desc, mask, 0); + /* Keep cached state to be restored */ + __pci_msi_desc_mask_irq(desc, mask, ~mask); /* Restore dev->irq to its default pin-assertion irq */ dev->irq = desc->msi_attrib.default_irq; @@ -1026,8 +986,10 @@ void pci_msix_shutdown(struct pci_dev *dev) return; /* Return the device with MSI-X masked as initial states */ - for_each_pci_msi_entry(entry, dev) + for_each_pci_msi_entry(entry, dev) { + /* Keep cached states to be restored */ __pci_msix_desc_mask_irq(entry, 1); + } pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 7d200a88cd74..024b5c179348 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -157,7 +157,7 @@ static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, obj->buffer.length, UTF16_LITTLE_ENDIAN, - buf, PAGE_SIZE - 1); + buf, PAGE_SIZE); buf[len] = '\n'; } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index f4021636de0a..8a12b5392b8f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1334,7 +1334,11 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ - pci_update_current_state(dev, dev->current_state); + if (dev->pm_cap) { + u16 pmcsr; + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + } if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ @@ -1872,14 +1876,7 @@ int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, if (enable) { int error; - /* - * Enable PME signaling if the device can signal PME from - * D3cold regardless of whether or not it can signal PME from - * the current target state, because that will allow it to - * signal PME when the hierarchy above it goes into D3cold and - * the device itself ends up in D3cold as a result of that. - */ - if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) + if (pci_pme_capable(dev, state)) pci_pme_active(dev, true); else ret = 1; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 63c62e2c8c0d..becedabff141 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1668,7 +1668,6 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) pci_set_of_node(dev); if (pci_setup_device(dev)) { - pci_release_of_node(dev); pci_bus_put(dev->bus); kfree(dev); return NULL; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 518383e5cb6d..bdaeccafa261 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2915,13 +2915,12 @@ static void fixup_mpss_256(struct pci_dev *dev) { dev->pcie_mpss = 1; /* 256 bytes */ } -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, - PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); -DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, + PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); /* Intel 5000 and 5100 Memory controllers have an errata with read completion * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. @@ -3145,18 +3144,6 @@ static void quirk_no_bus_reset(struct pci_dev *dev) dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; } -/* - * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be - * prevented for those affected devices. - */ -static void quirk_nvidia_no_bus_reset(struct pci_dev *dev) -{ - if ((dev->device & 0xffc0) == 0x2340) - quirk_no_bus_reset(dev); -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, - quirk_nvidia_no_bus_reset); - /* * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. * The device will throw a Link Down error on AER-capable systems and @@ -3169,17 +3156,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset); - -/* - * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS - * automatically disables LTSSM when Secondary Bus Reset is received and - * the device stops working. Prevent bus reset for these devices. With - * this change, the device can be assigned to VMs with VFIO, but it will - * leak state between VMs. Reference - * https://e2e.ti.com/support/processors/f/791/t/954382 - */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset); static void quirk_no_pm_reset(struct pci_dev *dev) { @@ -3657,9 +3633,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); -/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, - quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ @@ -3676,9 +3649,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); -/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215, - quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, quirk_dma_func1_alias); diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 14d84d5a0f58..01a343ad7155 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -307,9 +307,6 @@ placeholder: goto err; } - INIT_LIST_HEAD(&slot->list); - list_add(&slot->list, &parent->slots); - err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); if (err) { @@ -317,6 +314,9 @@ placeholder: goto err; } + INIT_LIST_HEAD(&slot->list); + list_add(&slot->list, &parent->slots); + down_read(&pci_bus_sem); list_for_each_entry(dev, &parent->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot_nr) diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index f602176eb8b0..b91c4da68365 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -21,12 +21,10 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, u16 word; u32 dword; long err; - int cfg_ret; + long cfg_ret; - err = -EPERM; - dev = NULL; if (!capable(CAP_SYS_ADMIN)) - goto error; + return -EPERM; err = -ENODEV; dev = pci_get_bus_and_slot(bus, dfn); @@ -49,7 +47,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, } err = -EIO; - if (cfg_ret) + if (cfg_ret != PCIBIOS_SUCCESSFUL) goto error; switch (len) { @@ -107,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_byte(dev, off, byte); - if (err) + if (err != PCIBIOS_SUCCESSFUL) err = -EIO; break; @@ -116,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_word(dev, off, word); - if (err) + if (err != PCIBIOS_SUCCESSFUL) err = -EIO; break; @@ -125,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, if (err) break; err = pci_user_write_config_dword(dev, off, dword); - if (err) + if (err != PCIBIOS_SUCCESSFUL) err = -EIO; break; diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index a92cbc952b70..c3b615c94b4b 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -665,16 +665,18 @@ static int pccardd(void *__skt) if (events || sysfs_events) continue; - set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + /* make sure we are running */ + __set_current_state(TASK_RUNNING); + try_to_freeze(); } - /* make sure we are running before we exit */ - __set_current_state(TASK_RUNNING); /* shut down socket, if a device is still present */ if (skt->state & SOCKET_PRESENT) { diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index ba13e3c3d6b8..aae7e6df99cd 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c @@ -105,7 +105,6 @@ static int i82092aa_pci_probe(struct pci_dev *dev, const struct pci_device_id *i for (i = 0;iio_db; @@ -815,9 +812,6 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num, unsigned long min, max; int ret, i, j; - if (!res) - return NULL; - low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; diff --git a/drivers/phy/phy-dm816x-usb.c b/drivers/phy/phy-dm816x-usb.c index 908b5ff0e888..b4bbef664d20 100644 --- a/drivers/phy/phy-dm816x-usb.c +++ b/drivers/phy/phy-dm816x-usb.c @@ -246,28 +246,19 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev) pm_runtime_enable(phy->dev); generic_phy = devm_phy_create(phy->dev, NULL, &ops); - if (IS_ERR(generic_phy)) { - error = PTR_ERR(generic_phy); - goto clk_unprepare; - } + if (IS_ERR(generic_phy)) + return PTR_ERR(generic_phy); phy_set_drvdata(generic_phy, phy); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - error = PTR_ERR(phy_provider); - goto clk_unprepare; - } + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); usb_add_phy_dev(&phy->phy); return 0; - -clk_unprepare: - pm_runtime_disable(phy->dev); - clk_unprepare(phy->refclk); - return error; } static int dm816x_usb_phy_remove(struct platform_device *pdev) diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 168780eb29aa..f96065a81d1e 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -753,7 +753,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) usb_remove_phy(&twl->phy); pm_runtime_get_sync(twl->dev); - cancel_delayed_work_sync(&twl->id_workaround_work); + cancel_delayed_work(&twl->id_workaround_work); device_remove_file(twl->dev, &dev_attr_vbus); /* set transceiver mode to power on defaults */ diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 43440b6b85bc..a3b27856b9e9 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -399,6 +399,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; + pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -406,6 +407,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; + pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -413,6 +415,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg &= ~BIT(LEVEL_TRIG_OFF); pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF; + pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_edge_irq); break; @@ -420,6 +423,8 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF; + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); + pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; @@ -427,6 +432,8 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF; pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF); pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF; + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF); + pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF; irq_set_handler_locked(d, handle_level_irq); break; diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c index adcdb0585d39..0b0fc2eb48e0 100644 --- a/drivers/pinctrl/pinctrl-falcon.c +++ b/drivers/pinctrl/pinctrl-falcon.c @@ -438,28 +438,24 @@ static int pinctrl_falcon_probe(struct platform_device *pdev) /* load and remap the pad resources of the different banks */ for_each_compatible_node(np, NULL, "lantiq,pad-falcon") { + struct platform_device *ppdev = of_find_device_by_node(np); const __be32 *bank = of_get_property(np, "lantiq,bank", NULL); struct resource res; - struct platform_device *ppdev; u32 avail; int pins; if (!of_device_is_available(np)) continue; - if (!bank || *bank >= PORTS) - continue; - if (of_address_to_resource(np, 0, &res)) - continue; - - ppdev = of_find_device_by_node(np); if (!ppdev) { dev_err(&pdev->dev, "failed to find pad pdev\n"); continue; } - + if (!bank || *bank >= PORTS) + continue; + if (of_address_to_resource(np, 0, &res)) + continue; falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL); - put_device(&ppdev->dev); if (IS_ERR(falcon_info.clk[*bank])) { dev_err(&ppdev->dev, "failed to get clock\n"); return PTR_ERR(falcon_info.clk[*bank]); diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 59f103c115cc..eba400df8215 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -1967,15 +1967,12 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev) static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) { struct rockchip_pinctrl *info = dev_get_drvdata(dev); - int ret; + int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, + rk3288_grf_gpio6c_iomux | + GPIO6C6_SEL_WRITE_ENABLE); - if (info->ctrl->type == RK3288) { - ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, - rk3288_grf_gpio6c_iomux | - GPIO6C6_SEL_WRITE_ENABLE); - if (ret) - return ret; - } + if (ret) + return ret; return pinctrl_force_default(info->pctl_dev); } diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 9c6afaebc9cf..17714793c08e 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1328,7 +1328,6 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF) { dev_err(pcs->dev, "pinconf not supported\n"); - res = -ENOTSUPP; goto free_pingroups; } diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index e16d82bb36a9..2b331d5b9e79 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c @@ -137,10 +137,6 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) if (copy_from_user(&u_cmd, arg, sizeof(u_cmd))) return -EFAULT; - if ((u_cmd.outsize > EC_MAX_MSG_BYTES) || - (u_cmd.insize > EC_MAX_MSG_BYTES)) - return -EINVAL; - s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), GFP_KERNEL); if (!s_cmd) diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index db2cd5994c2d..5c285f2b3a65 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -182,15 +182,6 @@ static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev, msg->insize = sizeof(struct ec_response_get_protocol_info); ret = send_command(ec_dev, msg); - /* - * Send command once again when timeout occurred. - * Fingerprint MCU (FPMCU) is restarted during system boot which - * introduces small window in which FPMCU won't respond for any - * messages sent by kernel. There is no need to wait before next - * attempt because we waited at least EC_MSG_DEADLINE_MS. - */ - if (ret == -ETIMEDOUT) - ret = send_command(ec_dev, msg); if (ret < 0) { dev_dbg(ec_dev->dev, @@ -320,8 +311,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE; ec_dev->max_passthru = 0; ec_dev->pkt_xfer = NULL; - ec_dev->din_size = EC_PROTO2_MSG_BYTES; - ec_dev->dout_size = EC_PROTO2_MSG_BYTES; + ec_dev->din_size = EC_MSG_BYTES; + ec_dev->dout_size = EC_MSG_BYTES; } else { /* * It's possible for a test to occur too early when diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index 502c225ed1c2..20351dd8a492 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2018, 2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,7 +18,6 @@ #include #include #include -#include #include "gsi.h" #include "gsi_reg.h" @@ -1606,7 +1605,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, { struct gsi_chan_ctx *ctx; uint32_t val; - int res, size; + int res; int ee; enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE; uint8_t erindex; @@ -1651,8 +1650,9 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, } memset(ctx, 0, sizeof(*ctx)); - size = (props->ring_len / props->re_size) * sizeof(void *); - user_data = kzalloc(size, GFP_KERNEL); + user_data = devm_kzalloc(gsi_ctx->dev, + (props->ring_len / props->re_size) * sizeof(void *), + GFP_KERNEL); if (user_data == NULL) { GSIERR("%s:%d gsi context not allocated\n", __func__, __LINE__); return -GSI_STATUS_RES_ALLOC_FAILURE; @@ -1675,14 +1675,14 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl, if (res == 0) { GSIERR("chan_hdl=%u timed out\n", props->ch_id); mutex_unlock(&gsi_ctx->mlock); - kfree(user_data); + devm_kfree(gsi_ctx->dev, user_data); return -GSI_STATUS_TIMED_OUT; } if (ctx->state != GSI_CHAN_STATE_ALLOCATED) { GSIERR("chan_hdl=%u allocation failed state=%d\n", props->ch_id, ctx->state); mutex_unlock(&gsi_ctx->mlock); - kfree(user_data); + devm_kfree(gsi_ctx->dev, user_data); return -GSI_STATUS_RES_ALLOC_FAILURE; } mutex_unlock(&gsi_ctx->mlock); @@ -2129,7 +2129,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl) mutex_unlock(&gsi_ctx->mlock); - kfree(ctx->user_data); + devm_kfree(gsi_ctx->dev, ctx->user_data); ctx->allocated = false; if (ctx->evtr) atomic_dec(&ctx->evtr->chan_ref_cnt); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c index b2e876c26749..c29cbdf95057 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1065,10 +1065,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, } else { list_add(&entry->link, &tbl->head_flt_rule_list); } - if (tbl->rule_cnt < IPA_RULE_CNT_MAX) - tbl->rule_cnt++; - else - return -EINVAL; + tbl->rule_cnt++; if (entry->rt_tbl) entry->rt_tbl->ref_cnt++; id = ipa_id_alloc(entry); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index bf0b069f92bd..1c9eeb50d1cd 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -201,7 +201,6 @@ #define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96 #define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 #define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40 -#define IPA_RULE_CNT_MAX 512 struct ipa2_active_client_htable_entry { struct hlist_node list; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c index 736ac63e48de..78555729d78a 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -506,38 +506,6 @@ static int qmi_init_modem_send_sync_msg(void) resp.resp.error, "ipa_init_modem_driver_resp_msg_v01"); } -static int ipa_qmi_filter_request_ex_calc_length( - struct ipa_install_fltr_rule_req_msg_v01 *req) -{ - int len = 0; - /* caller should validate and send the req */ - /* instead of sending max length,the approximate length is calculated */ - len += ((sizeof(struct ipa_install_fltr_rule_req_msg_v01)) - - (QMI_IPA_MAX_FILTERS_V01 * - sizeof(struct ipa_filter_spec_type_v01)) - - (QMI_IPA_MAX_FILTERS_V01 * sizeof(uint32_t)) - - (QMI_IPA_MAX_FILTERS_V01 * - sizeof(struct ipa_filter_spec_ex_type_v01))); - - if (req->filter_spec_list_valid && - req->filter_spec_list_len > 0) { - len += sizeof(struct ipa_filter_spec_type_v01)* - req->filter_spec_list_len; - } - - if (req->xlat_filter_indices_list_valid && - req->xlat_filter_indices_list_len > 0) { - len += sizeof(uint32_t)*req->xlat_filter_indices_list_len; - } - - if (req->filter_spec_ex_list_valid && - req->filter_spec_ex_list_len > 0) { - len += sizeof(struct ipa_filter_spec_ex_type_v01)* - req->filter_spec_ex_list_len; - } - return len; - -} /* sending filter-install-request to modem*/ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) { @@ -606,9 +574,7 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) } mutex_unlock(&ipa_qmi_lock); - req_desc.max_msg_len = ipa_qmi_filter_request_ex_calc_length(req); - IPAWANDBG("QMI send request length = %d\n", req_desc.max_msg_len); - + req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01; req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01; req_desc.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index abb7947b2a06..007f92bcee13 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1086,10 +1086,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, list_add_tail(&entry->link, &tbl->head_rt_rule_list); else list_add(&entry->link, &tbl->head_rt_rule_list); - if (tbl->rule_cnt < IPA_RULE_CNT_MAX) - tbl->rule_cnt++; - else - return -EINVAL; + tbl->rule_cnt++; if (entry->hdr) entry->hdr->ref_cnt++; else if (entry->proc_ctx) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index f36687b44b8d..060b40a3acc6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -824,10 +824,7 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, { int id; - if (tbl->rule_cnt < IPA_RULE_CNT_MAX) - tbl->rule_cnt++; - else - return -EINVAL; + tbl->rule_cnt++; if (entry->rt_tbl) entry->rt_tbl->ref_cnt++; id = ipa3_id_alloc(entry); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 5c1e49435631..7691aa93d544 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -190,8 +190,6 @@ #define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 #define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40 -#define IPA_RULE_CNT_MAX 512 - struct ipa3_active_client_htable_entry { struct hlist_node list; char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN]; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 473618e9bd6a..d8afb0c3becc 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -1000,10 +1000,7 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl, { int id; - if (tbl->rule_cnt < IPA_RULE_CNT_MAX) - tbl->rule_cnt++; - else - return -EINVAL; + tbl->rule_cnt++; if (entry->hdr) entry->hdr->ref_cnt++; else if (entry->proc_ctx) diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index e4f278fa6d69..5c169a837ebd 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -124,7 +124,6 @@ static const struct key_entry acer_wmi_keymap[] __initconst = { {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ {KE_IGNORE, 0x81, {KEY_SLEEP} }, {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */ - {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */ {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} }, {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} }, {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, @@ -229,7 +228,6 @@ static int mailled = -1; static int brightness = -1; static int threeg = -1; static int force_series; -static int force_caps = -1; static bool ec_raw_mode; static bool has_type_aa; static u16 commun_func_bitmap; @@ -239,13 +237,11 @@ module_param(mailled, int, 0444); module_param(brightness, int, 0444); module_param(threeg, int, 0444); module_param(force_series, int, 0444); -module_param(force_caps, int, 0444); module_param(ec_raw_mode, bool, 0444); MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); MODULE_PARM_DESC(force_series, "Force a different laptop series"); -MODULE_PARM_DESC(force_caps, "Force the capability bitmask to this value"); MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); struct acer_data { @@ -2153,7 +2149,7 @@ static int __init acer_wmi_init(void) } /* WMID always provides brightness methods */ interface->capability |= ACER_CAP_BRIGHTNESS; - } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa && force_caps == -1) { + } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { pr_err("No WMID device detection method found\n"); return -ENODEV; } @@ -2183,9 +2179,6 @@ static int __init acer_wmi_init(void) if (acpi_video_get_backlight_type() != acpi_backlight_vendor) interface->capability &= ~ACER_CAP_BRIGHTNESS; - if (force_caps != -1) - interface->capability = force_caps; - if (wmi_has_guid(WMID_GUID3)) { if (ec_raw_mode) { if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index a0f10ccdca3e..976efeb3f2ba 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -461,7 +461,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) } gmux_data->iostart = res->start; - gmux_data->iolen = resource_size(res); + gmux_data->iolen = res->end - res->start; if (gmux_data->iolen < GMUX_MIN_IO_LEN) { pr_err("gmux I/O region too small (%lu < %u)\n", diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 411616e6cb74..10ce6cba4455 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -101,9 +101,6 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids); static int lis3lv02d_acpi_init(struct lis3lv02d *lis3) { struct acpi_device *dev = lis3->bus_priv; - if (!lis3->init_required) - return 0; - if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI, NULL, NULL) != AE_OK) return -EINVAL; @@ -364,7 +361,6 @@ static int lis3lv02d_add(struct acpi_device *device) } /* call the core layer do its init */ - lis3_dev.init_required = true; ret = lis3lv02d_init_device(&lis3_dev); if (ret) return ret; @@ -377,11 +373,9 @@ static int lis3lv02d_add(struct acpi_device *device) INIT_WORK(&hpled_led.work, delayed_set_status_worker); ret = led_classdev_register(NULL, &hpled_led.led_classdev); if (ret) { - i8042_remove_filter(hp_accel_i8042_filter); lis3lv02d_joystick_disable(&lis3_dev); lis3lv02d_poweroff(&lis3_dev); flush_work(&hpled_led.work); - lis3lv02d_remove_fs(&lis3_dev); return ret; } @@ -414,27 +408,11 @@ static int lis3lv02d_suspend(struct device *dev) static int lis3lv02d_resume(struct device *dev) { - lis3_dev.init_required = false; - lis3lv02d_poweron(&lis3_dev); - return 0; -} - -static int lis3lv02d_restore(struct device *dev) -{ - lis3_dev.init_required = true; lis3lv02d_poweron(&lis3_dev); return 0; } -static const struct dev_pm_ops hp_accel_pm = { - .suspend = lis3lv02d_suspend, - .resume = lis3lv02d_resume, - .freeze = lis3lv02d_suspend, - .thaw = lis3lv02d_resume, - .poweroff = lis3lv02d_suspend, - .restore = lis3lv02d_restore, -}; - +static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); #define HP_ACCEL_PM (&hp_accel_pm) #else #define HP_ACCEL_PM NULL diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 04cabcbd8aaa..f94b730540e2 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -188,7 +188,7 @@ static inline int busy_loop(struct intel_scu_ipc_dev *scu) return 0; } -/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */ +/* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 466a0d0162c3..9180b24ba60a 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1168,6 +1168,15 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk) return status; } +/* Query FW and update rfkill sw state for all rfkill switches */ +static void tpacpi_rfk_update_swstate_all(void) +{ + unsigned int i; + + for (i = 0; i < TPACPI_RFK_SW_MAX; i++) + tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]); +} + /* * Sync the HW-blocking state of all rfkill switches, * do notice it causes the rfkill core to schedule uevents @@ -3006,6 +3015,9 @@ static void tpacpi_send_radiosw_update(void) if (wlsw == TPACPI_RFK_RADIO_OFF) tpacpi_rfk_update_hwblock_state(true); + /* Sync sw blocking state */ + tpacpi_rfk_update_swstate_all(); + /* Sync hw blocking state last if it is hw-unblocked */ if (wlsw == TPACPI_RFK_RADIO_ON) tpacpi_rfk_update_hwblock_state(false); @@ -5754,7 +5766,6 @@ enum thermal_access_mode { enum { /* TPACPI_THERMAL_TPEC_* */ TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */ TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */ - TP_EC_FUNCREV = 0xEF, /* ACPI EC Functional revision */ TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */ @@ -5953,7 +5964,7 @@ static const struct attribute_group thermal_temp_input8_group = { static int __init thermal_init(struct ibm_init_struct *iibm) { - u8 t, ta1, ta2, ver = 0; + u8 t, ta1, ta2; int i; int acpi_tmp7; int res; @@ -5968,14 +5979,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm) * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for * non-implemented, thermal sensors return 0x80 when * not available - * The above rule is unfortunately flawed. This has been seen with - * 0xC2 (power supply ID) causing thermal control problems. - * The EC version can be determined by offset 0xEF and at least for - * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7 - * are not thermal registers. */ - if (!acpi_ec_read(TP_EC_FUNCREV, &ver)) - pr_warn("Thinkpad ACPI EC unable to access EC version\n"); ta1 = ta2 = 0; for (i = 0; i < 8; i++) { @@ -5985,13 +5989,11 @@ static int __init thermal_init(struct ibm_init_struct *iibm) ta1 = 0; break; } - if (ver < 3) { - if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { - ta2 |= t; - } else { - ta1 = 0; - break; - } + if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) { + ta2 |= t; + } else { + ta1 = 0; + break; } } if (ta1 == 0) { @@ -6007,12 +6009,9 @@ static int __init thermal_init(struct ibm_init_struct *iibm) thermal_read_mode = TPACPI_THERMAL_NONE; } } else { - if (ver >= 3) - thermal_read_mode = TPACPI_THERMAL_TPEC_8; - else - thermal_read_mode = - (ta2 != 0) ? - TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; + thermal_read_mode = + (ta2 != 0) ? + TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8; } } else if (acpi_tmp7) { if (tpacpi_is_ibm() && @@ -8594,7 +8593,7 @@ static int fan_write_cmd_level(const char *cmd, int *rc) if (strlencmp(cmd, "level auto") == 0) level = TP_EC_FAN_AUTO; - else if ((strlencmp(cmd, "level disengaged") == 0) || + else if ((strlencmp(cmd, "level disengaged") == 0) | (strlencmp(cmd, "level full-speed") == 0)) level = TP_EC_FAN_FULLSPEED; else if (sscanf(cmd, "level %d", &level) != 1) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 974d4ac78d10..1ff95b5a429d 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -2448,7 +2448,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) if (!dev->info_supported && !dev->system_event_supported) { pr_warn("No hotkey query interface found\n"); - error = -EINVAL; goto err_remove_filter; } diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index fb16c83900a0..eb391a281833 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -367,14 +367,7 @@ struct acpi_buffer *out) * the WQxx method failed - we should disable collection anyway. */ if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { - /* - * Ignore whether this WCxx call succeeds or not since - * the previously executed WQxx method call might have - * succeeded, and returning the failing status code - * of this call would throw away the result of the WQxx - * call, potentially leaking memory. - */ - acpi_execute_simple_method(handle, wc_method, 0); + status = acpi_execute_simple_method(handle, wc_method, 0); } return status; diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c index 24732df01cf9..8f8044e1acf3 100644 --- a/drivers/power/ab8500_btemp.c +++ b/drivers/power/ab8500_btemp.c @@ -1186,7 +1186,6 @@ static const struct of_device_id ab8500_btemp_match[] = { { .compatible = "stericsson,ab8500-btemp", }, { }, }; -MODULE_DEVICE_TABLE(of, ab8500_btemp_match); static struct platform_driver ab8500_btemp_driver = { .probe = ab8500_btemp_probe, diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c index 1a7013ec0caf..e388171f4e58 100644 --- a/drivers/power/ab8500_charger.c +++ b/drivers/power/ab8500_charger.c @@ -409,14 +409,6 @@ disable_otp: static void ab8500_power_supply_changed(struct ab8500_charger *di, struct power_supply *psy) { - /* - * This happens if we get notifications or interrupts and - * the platform has been configured not to support one or - * other type of charging. - */ - if (!psy) - return; - if (di->autopower_cfg) { if (!di->usb.charger_connected && !di->ac.charger_connected && @@ -443,15 +435,7 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di, if (!connected) di->flags.vbus_drop_end = false; - /* - * Sometimes the platform is configured not to support - * USB charging and no psy has been created, but we still - * will get these notifications. - */ - if (di->usb_chg.psy) { - sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, - "present"); - } + sysfs_notify(&di->usb_chg.psy->dev.kobj, NULL, "present"); if (connected) { mutex_lock(&di->charger_attached_mutex); @@ -3756,7 +3740,6 @@ static const struct of_device_id ab8500_charger_match[] = { { .compatible = "stericsson,ab8500-charger", }, { }, }; -MODULE_DEVICE_TABLE(of, ab8500_charger_match); static struct platform_driver ab8500_charger_driver = { .probe = ab8500_charger_probe, diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c index c58b496ca05a..d91111200dde 100644 --- a/drivers/power/ab8500_fg.c +++ b/drivers/power/ab8500_fg.c @@ -3239,7 +3239,6 @@ static const struct of_device_id ab8500_fg_match[] = { { .compatible = "stericsson,ab8500-fg", }, { }, }; -MODULE_DEVICE_TABLE(of, ab8500_fg_match); static struct platform_driver ab8500_fg_driver = { .probe = ab8500_fg_probe, diff --git a/drivers/power/bq25890_charger.c b/drivers/power/bq25890_charger.c index faf2a6243567..f993a55cde20 100644 --- a/drivers/power/bq25890_charger.c +++ b/drivers/power/bq25890_charger.c @@ -521,12 +521,12 @@ static void bq25890_handle_state_change(struct bq25890_device *bq, if (!new_state->online) { /* power removed */ /* disable ADC */ - ret = bq25890_field_write(bq, F_CONV_RATE, 0); + ret = bq25890_field_write(bq, F_CONV_START, 0); if (ret < 0) goto error; } else if (!old_state.online) { /* power inserted */ /* enable ADC, to have control of charge current/voltage */ - ret = bq25890_field_write(bq, F_CONV_RATE, 1); + ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) goto error; } diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c index 6656f847ed93..1ea5d1aa268b 100644 --- a/drivers/power/charger-manager.c +++ b/drivers/power/charger-manager.c @@ -1490,7 +1490,6 @@ static const struct of_device_id charger_manager_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, charger_manager_match); static struct charger_desc *of_cm_parse_desc(struct device *dev) { diff --git a/drivers/power/generic-adc-battery.c b/drivers/power/generic-adc-battery.c index 86289f9da85a..fedc5818fab7 100644 --- a/drivers/power/generic-adc-battery.c +++ b/drivers/power/generic-adc-battery.c @@ -379,7 +379,7 @@ static int gab_remove(struct platform_device *pdev) } kfree(adc_bat->psy_desc.properties); - cancel_delayed_work_sync(&adc_bat->bat_work); + cancel_delayed_work(&adc_bat->bat_work); return 0; } diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 01ca03c809d0..da7a75f82489 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -246,10 +246,7 @@ static int max17042_get_property(struct power_supply *psy, val->intval = data * 625 / 8; break; case POWER_SUPPLY_PROP_CAPACITY: - if (chip->pdata->enable_current_sense) - ret = regmap_read(map, MAX17042_RepSOC, &data); - else - ret = regmap_read(map, MAX17042_VFSOC, &data); + ret = regmap_read(map, MAX17042_RepSOC, &data); if (ret < 0) return ret; @@ -647,7 +644,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip) struct max17042_config_data *config = chip->pdata->config_data; max17042_override_por(map, MAX17042_TGAIN, config->tgain); - max17042_override_por(map, MAX17042_TOFF, config->toff); + max17042_override_por(map, MAx17042_TOFF, config->toff); max17042_override_por(map, MAX17042_CGAIN, config->cgain); max17042_override_por(map, MAX17042_COFF, config->coff); @@ -755,8 +752,7 @@ static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off) regmap_read(map, MAX17042_RepSOC, &soc); soc >>= 8; soc_tr = (soc + off) << 8; - if (off < soc) - soc_tr |= soc - off; + soc_tr |= (soc - off); regmap_write(map, MAX17042_SALRT_Th, soc_tr); } @@ -764,12 +760,8 @@ static irqreturn_t max17042_thread_handler(int id, void *dev) { struct max17042_chip *chip = dev; u32 val; - int ret; - - ret = regmap_read(chip->regmap, MAX17042_STATUS, &val); - if (ret) - return IRQ_HANDLED; + regmap_read(chip->regmap, MAX17042_STATUS, &val); if ((val & STATUS_INTR_SOCMIN_BIT) || (val & STATUS_INTR_SOCMAX_BIT)) { dev_info(&chip->client->dev, "SOC threshold INTR\n"); diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 211e3472ab4a..5c3dd9e77dff 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -317,7 +317,6 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(enable_jeita_detection), POWER_SUPPLY_ATTR(allow_hvdcp3), POWER_SUPPLY_ATTR(max_pulse_allowed), - POWER_SUPPLY_ATTR(fg_reset_clock), POWER_SUPPLY_ATTR(skin_temp), POWER_SUPPLY_ATTR(smart_charging_activation), POWER_SUPPLY_ATTR(smart_charging_interruption), diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c index a44e3427fdeb..be3d81ff51cc 100644 --- a/drivers/power/reset/gpio-poweroff.c +++ b/drivers/power/reset/gpio-poweroff.c @@ -84,7 +84,6 @@ static const struct of_device_id of_gpio_poweroff_match[] = { { .compatible = "gpio-poweroff", }, {}, }; -MODULE_DEVICE_TABLE(of, of_gpio_poweroff_match); static struct platform_driver gpio_poweroff_driver = { .probe = gpio_poweroff_probe, diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index ec54cff108b3..15fed9d8f871 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -169,8 +169,8 @@ static void ltc2952_poweroff_kill(void) static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) { - data->wde_interval = ktime_set(0, 300L * NSEC_PER_MSEC); - data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC); + data->wde_interval = ktime_set(0, 300L*1E6L); + data->trigger_delay = ktime_set(2, 500L*1E6L); hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer_trigger.function = ltc2952_poweroff_timer_trigger; diff --git a/drivers/power/rt5033_battery.c b/drivers/power/rt5033_battery.c index c9a58ed4dc9f..bcdd83048492 100644 --- a/drivers/power/rt5033_battery.c +++ b/drivers/power/rt5033_battery.c @@ -63,7 +63,7 @@ static int rt5033_battery_get_watt_prop(struct i2c_client *client, regmap_read(battery->regmap, regh, &msb); regmap_read(battery->regmap, regl, &lsb); - ret = ((msb << 4) + (lsb >> 4)) * 1250; + ret = ((msb << 4) + (lsb >> 4)) * 1250 / 1000; return ret; } diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index 06b412c43aa7..0ffe5cd3abf6 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c @@ -392,7 +392,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev) gpio_free(pdata->gpio_charge_finished); } - cancel_delayed_work_sync(&bat_work); + cancel_delayed_work(&bat_work); if (pdata->exit) pdata->exit(); diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 9c87c018fa97..6cefd23afbf6 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -576,5 +576,4 @@ extern void fg_circ_buf_clr(struct fg_circ_buf *); extern int fg_circ_buf_avg(struct fg_circ_buf *, int *); extern int fg_circ_buf_median(struct fg_circ_buf *, int *); extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *); -extern int fg_dma_mem_req(struct fg_chip *, bool); #endif diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c index 694e8f769516..8a949bfe61d0 100644 --- a/drivers/power/supply/qcom/fg-memif.c +++ b/drivers/power/supply/qcom/fg-memif.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -746,64 +746,6 @@ out: return rc; } -int fg_dma_mem_req(struct fg_chip *chip, bool request) -{ - int ret, rc = 0, retry_count = RETRY_COUNT; - u8 val; - - if (request) { - /* configure for DMA access */ - rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), - MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, - MEM_ACCESS_REQ_BIT); - if (rc < 0) { - pr_err("failed to set mem_access bit rc=%d\n", rc); - return rc; - } - - rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), - MEM_IF_ARB_REQ_BIT, MEM_IF_ARB_REQ_BIT); - if (rc < 0) { - pr_err("failed to set mem_arb bit rc=%d\n", rc); - goto release_mem; - } - - while (retry_count--) { - rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &val, 1); - if (rc < 0) { - pr_err("failed to set ima_rt_sts rc=%d\n", rc); - goto release_mem; - } - if (val & MEM_GNT_BIT) - break; - msleep(20); - } - if ((retry_count < 0) && !(val & MEM_GNT_BIT)) { - pr_err("failed to get memory access\n"); - rc = -ETIMEDOUT; - goto release_mem; - } - - return 0; - } - -release_mem: - /* Release access */ - rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), - MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0); - if (rc < 0) - pr_err("failed to reset mem_access bit rc = %d\n", rc); - - ret = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), - MEM_IF_ARB_REQ_BIT, 0); - if (ret < 0) { - pr_err("failed to release mem_arb bit rc=%d\n", ret); - return ret; - } - - return rc; -} - int fg_ima_init(struct fg_chip *chip) { int rc; diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h index 906792e1ed79..cd0b2fb4391f 100644 --- a/drivers/power/supply/qcom/fg-reg.h +++ b/drivers/power/supply/qcom/fg-reg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,7 +29,6 @@ #define BATT_SOC_STS_CLR(chip) (chip->batt_soc_base + 0x4A) #define BATT_SOC_LOW_PWR_CFG(chip) (chip->batt_soc_base + 0x52) #define BATT_SOC_LOW_PWR_STS(chip) (chip->batt_soc_base + 0x56) -#define BATT_SOC_RST_CTRL0(chip) (chip->batt_soc_base + 0xBA) /* BATT_SOC_INT_RT_STS */ #define MSOC_EMPTY_BIT BIT(5) @@ -40,9 +39,6 @@ /* BATT_SOC_RESTART */ #define RESTART_GO_BIT BIT(0) -/* BCL_RESET */ -#define BCL_RESET_BIT BIT(2) - /* FG_BATT_INFO register definitions */ #define BATT_INFO_BATT_TEMP_STS(chip) (chip->batt_info_base + 0x06) #define BATT_INFO_SYS_BATT(chip) (chip->batt_info_base + 0x07) @@ -62,6 +58,7 @@ #define BATT_INFO_JEITA_COLD(chip) (chip->batt_info_base + 0x63) #define BATT_INFO_JEITA_HOT(chip) (chip->batt_info_base + 0x64) #define BATT_INFO_JEITA_TOO_HOT(chip) (chip->batt_info_base + 0x65) + /* only for v1.1 */ #define BATT_INFO_ESR_CFG(chip) (chip->batt_info_base + 0x69) /* starting from v2.0 */ @@ -98,8 +95,6 @@ #define BATT_INFO_IADC_MSB(chip) (chip->batt_info_base + 0xAF) #define BATT_INFO_TM_MISC(chip) (chip->batt_info_base + 0xE5) #define BATT_INFO_TM_MISC1(chip) (chip->batt_info_base + 0xE6) -#define BATT_INFO_PEEK_MUX1(chip) (chip->batt_info_base + 0xEB) -#define BATT_INFO_RDBACK(chip) (chip->batt_info_base + 0xEF) /* BATT_INFO_BATT_TEMP_STS */ #define JEITA_TOO_HOT_STS_BIT BIT(7) @@ -269,12 +264,8 @@ #define ESR_REQ_CTL_BIT BIT(1) #define ESR_REQ_CTL_EN_BIT BIT(0) -/* BATT_INFO_PEEK_MUX1 */ -#define PEEK_MUX1_BIT BIT(0) - /* FG_MEM_IF register and bit definitions */ #define MEM_IF_INT_RT_STS(chip) ((chip->mem_if_base) + 0x10) -#define MEM_IF_MEM_ARB_CFG(chip) ((chip->mem_if_base) + 0x40) #define MEM_IF_MEM_INTF_CFG(chip) ((chip->mem_if_base) + 0x50) #define MEM_IF_IMA_CTL(chip) ((chip->mem_if_base) + 0x51) #define MEM_IF_IMA_CFG(chip) ((chip->mem_if_base) + 0x52) @@ -295,7 +286,6 @@ /* MEM_IF_INT_RT_STS */ #define MEM_XCP_BIT BIT(1) -#define MEM_GNT_BIT BIT(2) /* MEM_IF_MEM_INTF_CFG */ #define MEM_ACCESS_REQ_BIT BIT(7) @@ -336,7 +326,4 @@ /* MEM_IF_DMA_CTL */ #define DMA_CLEAR_LOG_BIT BIT(0) - -/* MEM_IF_REQ */ -#define MEM_IF_ARB_REQ_BIT BIT(0) #endif diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index f074ffe6c274..23dd9131d402 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -420,7 +420,7 @@ int fg_write(struct fg_chip *chip, int addr, u8 *val, int len) return -ENXIO; mutex_lock(&chip->bus_lock); - sec_access = (addr & 0x00FF) >= 0xBA; + sec_access = (addr & 0x00FF) > 0xD0; if (sec_access) { rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5); if (rc < 0) { @@ -460,7 +460,7 @@ int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val) return -ENXIO; mutex_lock(&chip->bus_lock); - sec_access = (addr & 0x00FF) >= 0xBA; + sec_access = (addr & 0x00FF) > 0xD0; if (sec_access) { rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5); if (rc < 0) { diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index b41c6c37d9d5..ffee016a6737 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -4602,9 +4602,6 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CC_STEP_SEL: pval->intval = chip->ttf.cc_step.sel; break; - case POWER_SUPPLY_PROP_FG_RESET_CLOCK: - pval->intval = 0; - break; #if defined(CONFIG_SOMC_CHARGER_EXTENSION) case POWER_SUPPLY_PROP_CHARGE_FULL_RAW: pval->intval = chip->charge_full_raw; @@ -4628,100 +4625,6 @@ static int fg_psy_get_property(struct power_supply *psy, return 0; } -#define BCL_RESET_RETRY_COUNT 4 -static int fg_bcl_reset(struct fg_chip *chip) -{ - int i, ret, rc = 0; - u8 val, peek_mux; - bool success = false; - - /* Read initial value of peek mux1 */ - rc = fg_read(chip, BATT_INFO_PEEK_MUX1(chip), &peek_mux, 1); - if (rc < 0) { - pr_err("Error in writing peek mux1, rc=%d\n", rc); - return rc; - } - - val = 0x83; - rc = fg_write(chip, BATT_INFO_PEEK_MUX1(chip), &val, 1); - if (rc < 0) { - pr_err("Error in writing peek mux1, rc=%d\n", rc); - return rc; - } - - mutex_lock(&chip->sram_rw_lock); - for (i = 0; i < BCL_RESET_RETRY_COUNT; i++) { - rc = fg_dma_mem_req(chip, true); - if (rc < 0) { - pr_err("Error in locking memory, rc=%d\n", rc); - goto unlock; - } - - rc = fg_read(chip, BATT_INFO_RDBACK(chip), &val, 1); - if (rc < 0) { - pr_err("Error in reading rdback, rc=%d\n", rc); - goto release_mem; - } - - if (val & PEEK_MUX1_BIT) { - rc = fg_masked_write(chip, BATT_SOC_RST_CTRL0(chip), - BCL_RESET_BIT, BCL_RESET_BIT); - if (rc < 0) { - pr_err("Error in writing RST_CTRL0, rc=%d\n", - rc); - goto release_mem; - } - - rc = fg_dma_mem_req(chip, false); - if (rc < 0) - pr_err("Error in unlocking memory, rc=%d\n", - rc); - - /* Delay of 2ms */ - usleep_range(2000, 3000); - ret = fg_masked_write(chip, BATT_SOC_RST_CTRL0(chip), - BCL_RESET_BIT, 0); - if (ret < 0) - pr_err("Error in writing RST_CTRL0, rc=%d\n", - rc); - if (!rc && !ret) - success = true; - - goto unlock; - } else { - rc = fg_dma_mem_req(chip, false); - if (rc < 0) { - pr_err("Error in unlocking memory, rc=%d\n", - rc); - goto unlock; - } - success = false; - pr_err_ratelimited("PEEK_MUX1 not set retrying...\n"); - msleep(1000); - } - } - -release_mem: - rc = fg_dma_mem_req(chip, false); - if (rc < 0) - pr_err("Error in unlocking memory, rc=%d\n", rc); - -unlock: - ret = fg_write(chip, BATT_INFO_PEEK_MUX1(chip), &peek_mux, 1); - if (ret < 0) { - pr_err("Error in writing peek mux1, rc=%d\n", rc); - mutex_unlock(&chip->sram_rw_lock); - return ret; - } - - mutex_unlock(&chip->sram_rw_lock); - - if (!success) - return -EAGAIN; - else - return rc; -} - static int fg_psy_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *pval) @@ -4810,13 +4713,6 @@ static int fg_psy_set_property(struct power_supply *psy, return rc; } break; - case POWER_SUPPLY_PROP_FG_RESET_CLOCK: - rc = fg_bcl_reset(chip); - if (rc < 0) { - pr_err("Error in resetting BCL clock, rc=%d\n", rc); - return rc; - } - break; default: break; } @@ -4914,7 +4810,6 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CC_STEP, POWER_SUPPLY_PROP_CC_STEP_SEL, - POWER_SUPPLY_PROP_FG_RESET_CLOCK, #if defined(CONFIG_SOMC_CHARGER_EXTENSION) POWER_SUPPLY_PROP_CHARGE_FULL_RAW, POWER_SUPPLY_PROP_TIME_TO_CAP_LEARNING, diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c index 2d7618375662..8c3f5adf1bc6 100644 --- a/drivers/ps3/ps3stor_lib.c +++ b/drivers/ps3/ps3stor_lib.c @@ -201,7 +201,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf)); dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf, dev->bounce_size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) { + if (!dev->bounce_dma) { dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n", __func__, __LINE__); error = -ENODEV; diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index a911325fc0b4..3aa22ae4d94c 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -698,7 +698,6 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = { }, {0} }; -MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id); static struct pci_driver pch_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 2d11ac277de8..6c6b44fd3f43 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -231,6 +231,10 @@ static int spear_pwm_probe(struct platform_device *pdev) static int spear_pwm_remove(struct platform_device *pdev) { struct spear_pwm_chip *pc = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < NUM_PWM; i++) + pwm_disable(&pc->chip.pwms[i]); /* clk was prepared in probe, hence unprepare it here */ clk_unprepare(pc->clk); diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index df235ac1a6b2..5cf4a97e0304 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -279,7 +279,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) static int axp20x_regulator_parse_dt(struct platform_device *pdev) { struct device_node *np, *regulators; - int ret = 0; + int ret; u32 dcdcfreq = 0; np = of_node_get(pdev->dev.parent->of_node); @@ -294,12 +294,13 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev) ret = axp20x_set_dcdc_freq(pdev, dcdcfreq); if (ret < 0) { dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret); + return ret; } + of_node_put(regulators); } - of_node_put(np); - return ret; + return 0; } static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode) diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index fa9cb7df79de..12a25b40e473 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -258,8 +258,7 @@ static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev, case DA9052_ID_BUCK3: case DA9052_ID_LDO2: case DA9052_ID_LDO3: - ret = DIV_ROUND_UP(abs(new_sel - old_sel) * info->step_uV, - 6250); + ret = (new_sel - old_sel) * info->step_uV / 6250; break; } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 41de5f1331fe..86015b393dd5 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -343,10 +343,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) min = t->time.tm_min; sec = t->time.tm_sec; - spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); - spin_unlock_irq(&rtc_lock); - if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ mon = (mon <= 12) ? bin2bcd(mon) : 0xff; diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index 4f10cb1561cc..ffa69e1c9245 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c @@ -26,8 +26,8 @@ static bool is_rtc_hctosys(struct rtc_device *rtc) int size; char name[NAME_SIZE]; - size = snprintf(name, NAME_SIZE, "rtc%d", rtc->id); - if (size >= NAME_SIZE) + size = scnprintf(name, NAME_SIZE, "rtc%d", rtc->id); + if (size > NAME_SIZE) return false; return !strncmp(name, CONFIG_RTC_HCTOSYS_DEVICE, NAME_SIZE); diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index f78360d99fb9..f42aa2b2dcba 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -332,6 +332,6 @@ static struct platform_driver tps65910_rtc_driver = { }; module_platform_driver(tps65910_rtc_driver); -MODULE_ALIAS("platform:tps65910-rtc"); +MODULE_ALIAS("platform:rtc-tps65910"); MODULE_AUTHOR("Venu Byravarasu "); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index aec25ea99d19..b76a85d14ef0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3286,6 +3286,8 @@ void dasd_generic_remove(struct ccw_device *cdev) struct dasd_device *device; struct dasd_block *block; + cdev->handler = NULL; + device = dasd_device_from_cdev(cdev); if (IS_ERR(device)) { dasd_remove_sysfs_files(cdev); @@ -3304,7 +3306,6 @@ void dasd_generic_remove(struct ccw_device *cdev) * no quite down yet. */ dasd_set_target_state(device, DASD_STATE_NEW); - cdev->handler = NULL; /* dasd_delete_device destroys the device reference. */ block = device->block; dasd_delete_device(device); diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index 03543c0a2dd0..6601047d4b65 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c @@ -258,6 +258,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) return; device->discipline->get_uid(device, &uid); spin_lock_irqsave(&lcu->lock, flags); + list_del_init(&device->alias_list); /* make sure that the workers don't use this device */ if (device == lcu->suc_data.device) { spin_unlock_irqrestore(&lcu->lock, flags); @@ -284,7 +285,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) spin_lock_irqsave(&aliastree.lock, flags); spin_lock(&lcu->lock); - list_del_init(&device->alias_list); if (list_empty(&lcu->grouplist) && list_empty(&lcu->active_devices) && list_empty(&lcu->inactive_devices)) { @@ -475,19 +475,11 @@ static int read_unit_address_configuration(struct dasd_device *device, spin_unlock_irqrestore(&lcu->lock, flags); rc = dasd_sleep_on(cqr); - if (!rc) - goto out; - - if (suborder_not_supported(cqr)) { - /* suborder not supported or device unusable for IO */ - rc = -EOPNOTSUPP; - } else { - /* IO failed but should be retried */ + if (rc && !suborder_not_supported(cqr)) { spin_lock_irqsave(&lcu->lock, flags); lcu->flags |= NEED_UAC_UPDATE; spin_unlock_irqrestore(&lcu->lock, flags); } -out: dasd_kfree_request(cqr, cqr->memdev); return rc; } @@ -645,7 +637,6 @@ int dasd_alias_add_device(struct dasd_device *device) } if (lcu->flags & UPDATE_PENDING) { list_move(&device->alias_list, &lcu->active_devices); - private->pavgroup = NULL; _schedule_lcu_update(lcu, device); } spin_unlock(&lcu->lock); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 151c8df394a8..50597f9522fe 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -257,9 +257,6 @@ static ssize_t chp_status_write(struct device *dev, if (!num_args) return count; - /* Wait until previous actions have settled. */ - css_wait_for_slow_path(); - if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) { mutex_lock(&cp->lock); error = s390_vary_chpid(cp->chpid, 1); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 276b2034cbda..f9d6a9f00640 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -769,6 +769,8 @@ int chsc_chp_vary(struct chp_id chpid, int on) { struct channel_path *chp = chpid_to_chp(chpid); + /* Wait until previous actions have settled. */ + css_wait_for_slow_path(); /* * Redo PathVerification on the devices the chpid connects to */ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index fd622021748f..f7630cf581cd 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -518,8 +518,6 @@ static void zfcp_fc_adisc_handler(void *data) goto out; } - /* re-init to undo drop from zfcp_fc_adisc() */ - port->d_id = ntoh24(adisc_resp->adisc_port_id); /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: @@ -533,7 +531,6 @@ static int zfcp_fc_adisc(struct zfcp_port *port) struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; struct Scsi_Host *shost = adapter->scsi_host; - u32 d_id; int ret; fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); @@ -558,15 +555,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port) fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - d_id = port->d_id; /* remember as destination for send els below */ - /* - * Force fresh GID_PN lookup on next port recovery. - * Must happen after request setup and before sending request, - * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler(). - */ - port->d_id = 0; - - ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els, + ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) kmem_cache_free(zfcp_fc_req_cache, fc_req); diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index b9b4491d732a..c7be7bb37209 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -3081,11 +3081,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, ccb->opcode = BLOGIC_INITIATOR_CCB_SG; ccb->datalen = count * sizeof(struct blogic_sg_seg); if (blogic_multimaster_type(adapter)) - ccb->data = (unsigned int) ccb->dma_handle + + ccb->data = (void *)((unsigned int) ccb->dma_handle + ((unsigned long) &ccb->sglist - - (unsigned long) ccb); + (unsigned long) ccb)); else - ccb->data = virt_to_32bit_virt(ccb->sglist); + ccb->data = ccb->sglist; scsi_for_each_sg(command, sg, count, i) { ccb->sglist[i].segbytes = sg_dma_len(sg); diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 5c950a7a1b1c..b53ec2f1e8cd 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h @@ -821,7 +821,7 @@ struct blogic_ccb { unsigned char cdblen; /* Byte 2 */ unsigned char sense_datalen; /* Byte 3 */ u32 datalen; /* Bytes 4-7 */ - u32 data; /* Bytes 8-11 */ + void *data; /* Bytes 8-11 */ unsigned char:8; /* Byte 12 */ unsigned char:8; /* Byte 13 */ enum blogic_adapter_status adapter_status; /* Byte 14 */ diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index 4bca37d52bad..867b864f5047 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c @@ -40,7 +40,7 @@ struct sccb_mgr_info { u16 si_per_targ_ultra_nego; u16 si_per_targ_no_disc; u16 si_per_targ_wide_nego; - u16 si_mflags; + u16 si_flags; unsigned char si_card_family; unsigned char si_bustype; unsigned char si_card_model[3]; @@ -1070,22 +1070,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) ScamFlg = (unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2); - pCardInfo->si_mflags = 0x0000; + pCardInfo->si_flags = 0x0000; if (i & 0x01) - pCardInfo->si_mflags |= SCSI_PARITY_ENA; + pCardInfo->si_flags |= SCSI_PARITY_ENA; if (!(i & 0x02)) - pCardInfo->si_mflags |= SOFT_RESET; + pCardInfo->si_flags |= SOFT_RESET; if (i & 0x10) - pCardInfo->si_mflags |= EXTENDED_TRANSLATION; + pCardInfo->si_flags |= EXTENDED_TRANSLATION; if (ScamFlg & SCAM_ENABLED) - pCardInfo->si_mflags |= FLAG_SCAM_ENABLED; + pCardInfo->si_flags |= FLAG_SCAM_ENABLED; if (ScamFlg & SCAM_LEVEL2) - pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2; + pCardInfo->si_flags |= FLAG_SCAM_LEVEL2; j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L); if (i & 0x04) { @@ -1101,7 +1101,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD)) - pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN; + pCardInfo->si_flags |= SUPPORT_16TAR_32LUN; pCardInfo->si_card_family = HARPOON_FAMILY; pCardInfo->si_bustype = BUSTYPE_PCI; @@ -1137,15 +1137,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) if (pCardInfo->si_card_model[1] == '3') { if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) - pCardInfo->si_mflags |= LOW_BYTE_TERM; + pCardInfo->si_flags |= LOW_BYTE_TERM; } else if (pCardInfo->si_card_model[2] == '0') { temp = RD_HARPOON(ioport + hp_xfer_pad); WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4))); if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) - pCardInfo->si_mflags |= LOW_BYTE_TERM; + pCardInfo->si_flags |= LOW_BYTE_TERM; WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4))); if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7)) - pCardInfo->si_mflags |= HIGH_BYTE_TERM; + pCardInfo->si_flags |= HIGH_BYTE_TERM; WR_HARPOON(ioport + hp_xfer_pad, temp); } else { temp = RD_HARPOON(ioport + hp_ee_ctrl); @@ -1163,9 +1163,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) WR_HARPOON(ioport + hp_ee_ctrl, temp); WR_HARPOON(ioport + hp_xfer_pad, temp2); if (!(temp3 & BIT(7))) - pCardInfo->si_mflags |= LOW_BYTE_TERM; + pCardInfo->si_flags |= LOW_BYTE_TERM; if (!(temp3 & BIT(6))) - pCardInfo->si_mflags |= HIGH_BYTE_TERM; + pCardInfo->si_flags |= HIGH_BYTE_TERM; } ARAM_ACCESS(ioport); @@ -1272,7 +1272,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id); CurrCard->ourId = pCardInfo->si_id; - i = (unsigned char)pCardInfo->si_mflags; + i = (unsigned char)pCardInfo->si_flags; if (i & SCSI_PARITY_ENA) WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P)); @@ -1286,14 +1286,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info j |= SCSI_TERM_ENA_H; WR_HARPOON(ioport + hp_ee_ctrl, j); - if (!(pCardInfo->si_mflags & SOFT_RESET)) { + if (!(pCardInfo->si_flags & SOFT_RESET)) { FPT_sresb(ioport, thisCard); FPT_scini(thisCard, pCardInfo->si_id, 0); } - if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS) + if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS) CurrCard->globalFlags |= F_NO_FILTER; if (pCurrNvRam) { diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 6efd17692a55..24e57e770432 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -3370,8 +3370,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) shost->host_no); seq_printf(m, - " iop_base 0x%p, cable_detect: %X, err_code %u\n", - v->iop_base, + " iop_base 0x%lx, cable_detect: %X, err_code %u\n", + (unsigned long)v->iop_base, AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, v->err_code); diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 9b5832b46dec..def3208dd290 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -500,7 +500,7 @@ ahc_inq(struct ahc_softc *ahc, u_int port) return ((ahc_inb(ahc, port)) | (ahc_inb(ahc, port+1) << 8) | (ahc_inb(ahc, port+2) << 16) - | (((uint64_t)ahc_inb(ahc, port+3)) << 24) + | (ahc_inb(ahc, port+3) << 24) | (((uint64_t)ahc_inb(ahc, port+4)) << 32) | (((uint64_t)ahc_inb(ahc, port+5)) << 40) | (((uint64_t)ahc_inb(ahc, port+6)) << 48) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index c89aab5e0ef8..758f76e88704 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5812,7 +5812,6 @@ hba_free: pci_disable_msix(phba->pcidev); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); - pci_disable_pcie_error_reporting(pcidev); pci_set_drvdata(pcidev, NULL); disable_pci: pci_release_regions(pcidev); diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig index 2eb2476852b1..d401a096dfc7 100644 --- a/drivers/scsi/bnx2fc/Kconfig +++ b/drivers/scsi/bnx2fc/Kconfig @@ -4,7 +4,6 @@ config SCSI_BNX2X_FCOE depends on (IPV6 || IPV6=n) depends on LIBFC depends on LIBFCOE - depends on MMU select NETDEVICES select ETHERNET select NET_VENDOR_BROADCOM diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 54400efc2079..573aeec7a02b 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -79,7 +79,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); -static void bnx2fc_port_destroy(struct fcoe_port *port); +static void bnx2fc_destroy_work(struct work_struct *work); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device @@ -492,8 +492,7 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u64 crc_err; - u32 fr_len, fr_crc; + u32 fr_len; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -525,11 +524,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - put_cpu(); - fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -581,15 +575,16 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - fr_crc = le32_to_cpu(fr_crc(fp)); + stats = per_cpu_ptr(lport->stats, smp_processor_id()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { - stats = per_cpu_ptr(lport->stats, get_cpu()); - crc_err = (stats->InvalidCRCCount++); - put_cpu(); - if (crc_err < 5) + if (le32_to_cpu(fr_crc(fp)) != + ~crc32(~0, skb->data, fr_len)) { + if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); + stats->InvalidCRCCount++; kfree_skb(skb); return; } @@ -860,6 +855,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); + + /* Ensure ALL destroy work has been completed before return */ + flush_workqueue(bnx2fc_wq); return; default: @@ -1150,8 +1148,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport) mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); - bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); + queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@ -1459,6 +1457,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; + INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); @@ -1583,8 +1582,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface) bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); - bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); + queue_work(bnx2fc_wq, &port->destroy_work); } /** @@ -1625,12 +1624,15 @@ netdev_err: return rc; } -static void bnx2fc_port_destroy(struct fcoe_port *port) +static void bnx2fc_destroy_work(struct work_struct *work) { + struct fcoe_port *port; struct fc_lport *lport; + port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; - BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); + + BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); bnx2fc_if_destroy(lport); } @@ -2467,6 +2469,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); + /* Ensure ALL destroy work has been completed before return */ + flush_workqueue(bnx2fc_wq); + bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig index b27a3738d940..ba30ff86d581 100644 --- a/drivers/scsi/bnx2i/Kconfig +++ b/drivers/scsi/bnx2i/Kconfig @@ -3,7 +3,6 @@ config SCSI_BNX2_ISCSI depends on NET depends on PCI depends on (IPV6 || IPV6=n) - depends on MMU select SCSI_ISCSI_ATTRS select NETDEVICES select ETHERNET diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index d1df694d9ed0..957767d38361 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -611,7 +611,7 @@ csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) struct fc_els_csp *csp; struct fc_els_cssp *clsp; enum fw_retval retval; - __be32 nport_id = 0; + __be32 nport_id; retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16)); if (retval != FW_SUCCESS) { diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 8490d0ff04ca..830b2d2dcf20 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -4809,7 +4809,6 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) /* initialise the adapter and everything we need */ if (adapter_init(acb, io_port_base, io_port_len, irq)) { dprintkl(KERN_INFO, "adapter init failed\n"); - acb = NULL; goto fail; } diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 389c13e1c978..58ce9020d69c 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -735,7 +735,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); - err = -ENOMEM; fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 8039c809cef2..3fd8b83ffbf9 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h @@ -177,6 +177,9 @@ #define MSG_SIZE 34 /* size of message structure */ #define MSG_REQUEST 0 /* async. event: message */ +/* cacheservice defines */ +#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */ + /* DPMEM constants */ #define DPMEM_MAGIC 0xC0FFEE11 #define IC_HEADER_BYTES 48 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 636548030046..82ac1cd818ac 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -259,11 +259,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, device_enable_async_suspend(&shost->shost_dev); - get_device(&shost->shost_gendev); error = device_add(&shost->shost_dev); if (error) goto out_del_gendev; + get_device(&shost->shost_gendev); + if (shost->transportt->host_size) { shost->shost_data = kzalloc(shost->transportt->host_size, GFP_KERNEL); @@ -299,11 +300,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, out_del_dev: device_del(&shost->shost_dev); out_del_gendev: - /* - * Host state is SHOST_RUNNING so we have to explicitly release - * ->shost_dev. - */ - put_device(&shost->shost_dev); device_del(&shost->shost_gendev); out_destroy_freelist: scsi_destroy_command_freelist(shost); @@ -359,7 +355,7 @@ static void scsi_host_dev_release(struct device *dev) kfree(shost->shost_data); - if (shost->shost_state != SHOST_CREATED) + if (parent) put_device(parent); kfree(shost); } diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index aa74f72e582a..db80ab8335df 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2883,10 +2883,8 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); - if (sdev->type == TYPE_DISK) { + if (sdev->type == TYPE_DISK) sdev->allow_restart = 1; - blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); - } spin_unlock_irqrestore(shost->host_lock, flags); return 0; } diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index 65f0dbfc3a45..9aaa74e349cc 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c @@ -170,9 +170,7 @@ static int esp_jazz_probe(struct platform_device *dev) if (!esp->command_block) goto fail_unmap_regs; - host->irq = err = platform_get_irq(dev, 0); - if (err < 0) - goto fail_unmap_command_block; + host->irq = platform_get_irq(dev, 0); err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); if (err < 0) goto fail_unmap_command_block; diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index a088f74a157c..b20c575564e4 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1577,13 +1577,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); - if (!rc) { + if (!rc) fc_exch_delete(ep); - } else { - FC_EXCH_DBG(ep, "ep is completed already," - "hence skip calling the resp\n"); - goto skip_resp; - } } /* @@ -1602,7 +1597,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) if (!fc_invoke_resp(ep, sp, fp)) fc_frame_free(fp); -skip_resp: fc_exch_release(ep); return; rel: @@ -1847,16 +1841,10 @@ static void fc_exch_reset(struct fc_exch *ep) fc_exch_hold(ep); - if (!rc) { + if (!rc) fc_exch_delete(ep); - } else { - FC_EXCH_DBG(ep, "ep is completed already," - "hence skip calling the resp\n"); - goto skip_resp; - } fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); -skip_resp: fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_release(ep); } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 815d224b9ff8..867fc036d6ef 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1754,7 +1754,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " - "lport->mfs:%u\n", mfs, lport->mfs); + "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); goto out; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b1ef1aa4dd44..36e415487fe5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1384,6 +1384,7 @@ void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err) { struct iscsi_conn *conn; + struct device *dev; spin_lock_bh(&session->frwd_lock); conn = session->leadconn; @@ -1392,8 +1393,10 @@ void iscsi_session_failure(struct iscsi_session *session, return; } - iscsi_get_conn(conn->cls_conn); + dev = get_device(&conn->cls_conn->dev); spin_unlock_bh(&session->frwd_lock); + if (!dev) + return; /* * if the host is being removed bypass the connection * recovery initialization because we are going to kill @@ -1403,7 +1406,7 @@ void iscsi_session_failure(struct iscsi_session *session, iscsi_conn_error_event(conn->cls_conn, err); else iscsi_conn_failure(conn, err); - iscsi_put_conn(conn->cls_conn); + put_device(dev); } EXPORT_SYMBOL_GPL(iscsi_session_failure); @@ -1565,9 +1568,14 @@ check_mgmt: } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { - if (rc == -ENOMEM || rc == -EACCES) - fail_scsi_task(conn->task, DID_IMM_RETRY); - else + if (rc == -ENOMEM || rc == -EACCES) { + spin_lock_bh(&conn->taskqueuelock); + list_add_tail(&conn->task->running, + &conn->cmdqueue); + conn->task = NULL; + spin_unlock_bh(&conn->taskqueuelock); + goto done; + } else fail_scsi_task(conn->task, DID_ABORT); spin_lock_bh(&conn->taskqueuelock); continue; @@ -2994,8 +3002,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; - char *tmp_persistent_address = conn->persistent_address; - char *tmp_local_ipaddr = conn->local_ipaddr; del_timer_sync(&conn->transport_timer); @@ -3017,6 +3023,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) spin_lock_bh(&session->frwd_lock); free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); + kfree(conn->persistent_address); + kfree(conn->local_ipaddr); /* regular RX path uses back_lock */ spin_lock_bh(&session->back_lock); kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, @@ -3028,8 +3036,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) mutex_unlock(&session->eh_mutex); iscsi_destroy_conn(cls_conn); - kfree(tmp_persistent_address); - kfree(tmp_local_ipaddr); } EXPORT_SYMBOL_GPL(iscsi_conn_teardown); @@ -3365,125 +3371,125 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, switch(param) { case ISCSI_PARAM_FAST_ABORT: - len = sysfs_emit(buf, "%d\n", session->fast_abort); + len = sprintf(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: - len = sysfs_emit(buf, "%d\n", session->abort_timeout); + len = sprintf(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: - len = sysfs_emit(buf, "%d\n", session->lu_reset_timeout); + len = sprintf(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: - len = sysfs_emit(buf, "%d\n", session->tgt_reset_timeout); + len = sprintf(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: - len = sysfs_emit(buf, "%d\n", session->initial_r2t_en); + len = sprintf(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: - len = sysfs_emit(buf, "%hu\n", session->max_r2t); + len = sprintf(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: - len = sysfs_emit(buf, "%d\n", session->imm_data_en); + len = sprintf(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: - len = sysfs_emit(buf, "%u\n", session->first_burst); + len = sprintf(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: - len = sysfs_emit(buf, "%u\n", session->max_burst); + len = sprintf(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: - len = sysfs_emit(buf, "%d\n", session->pdu_inorder_en); + len = sprintf(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: - len = sysfs_emit(buf, "%d\n", session->dataseq_inorder_en); + len = sprintf(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_DEF_TASKMGMT_TMO: - len = sysfs_emit(buf, "%d\n", session->def_taskmgmt_tmo); + len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); break; case ISCSI_PARAM_ERL: - len = sysfs_emit(buf, "%d\n", session->erl); + len = sprintf(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: - len = sysfs_emit(buf, "%s\n", session->targetname); + len = sprintf(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TARGET_ALIAS: - len = sysfs_emit(buf, "%s\n", session->targetalias); + len = sprintf(buf, "%s\n", session->targetalias); break; case ISCSI_PARAM_TPGT: - len = sysfs_emit(buf, "%d\n", session->tpgt); + len = sprintf(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: - len = sysfs_emit(buf, "%s\n", session->username); + len = sprintf(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: - len = sysfs_emit(buf, "%s\n", session->username_in); + len = sprintf(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: - len = sysfs_emit(buf, "%s\n", session->password); + len = sprintf(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: - len = sysfs_emit(buf, "%s\n", session->password_in); + len = sprintf(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: - len = sysfs_emit(buf, "%s\n", session->ifacename); + len = sprintf(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - len = sysfs_emit(buf, "%s\n", session->initiatorname); + len = sprintf(buf, "%s\n", session->initiatorname); break; case ISCSI_PARAM_BOOT_ROOT: - len = sysfs_emit(buf, "%s\n", session->boot_root); + len = sprintf(buf, "%s\n", session->boot_root); break; case ISCSI_PARAM_BOOT_NIC: - len = sysfs_emit(buf, "%s\n", session->boot_nic); + len = sprintf(buf, "%s\n", session->boot_nic); break; case ISCSI_PARAM_BOOT_TARGET: - len = sysfs_emit(buf, "%s\n", session->boot_target); + len = sprintf(buf, "%s\n", session->boot_target); break; case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: - len = sysfs_emit(buf, "%u\n", session->auto_snd_tgt_disable); + len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); break; case ISCSI_PARAM_DISCOVERY_SESS: - len = sysfs_emit(buf, "%u\n", session->discovery_sess); + len = sprintf(buf, "%u\n", session->discovery_sess); break; case ISCSI_PARAM_PORTAL_TYPE: - len = sysfs_emit(buf, "%s\n", session->portal_type); + len = sprintf(buf, "%s\n", session->portal_type); break; case ISCSI_PARAM_CHAP_AUTH_EN: - len = sysfs_emit(buf, "%u\n", session->chap_auth_en); + len = sprintf(buf, "%u\n", session->chap_auth_en); break; case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: - len = sysfs_emit(buf, "%u\n", session->discovery_logout_en); + len = sprintf(buf, "%u\n", session->discovery_logout_en); break; case ISCSI_PARAM_BIDI_CHAP_EN: - len = sysfs_emit(buf, "%u\n", session->bidi_chap_en); + len = sprintf(buf, "%u\n", session->bidi_chap_en); break; case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: - len = sysfs_emit(buf, "%u\n", session->discovery_auth_optional); + len = sprintf(buf, "%u\n", session->discovery_auth_optional); break; case ISCSI_PARAM_DEF_TIME2WAIT: - len = sysfs_emit(buf, "%d\n", session->time2wait); + len = sprintf(buf, "%d\n", session->time2wait); break; case ISCSI_PARAM_DEF_TIME2RETAIN: - len = sysfs_emit(buf, "%d\n", session->time2retain); + len = sprintf(buf, "%d\n", session->time2retain); break; case ISCSI_PARAM_TSID: - len = sysfs_emit(buf, "%u\n", session->tsid); + len = sprintf(buf, "%u\n", session->tsid); break; case ISCSI_PARAM_ISID: - len = sysfs_emit(buf, "%02x%02x%02x%02x%02x%02x\n", + len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", session->isid[0], session->isid[1], session->isid[2], session->isid[3], session->isid[4], session->isid[5]); break; case ISCSI_PARAM_DISCOVERY_PARENT_IDX: - len = sysfs_emit(buf, "%u\n", session->discovery_parent_idx); + len = sprintf(buf, "%u\n", session->discovery_parent_idx); break; case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: if (session->discovery_parent_type) - len = sysfs_emit(buf, "%s\n", + len = sprintf(buf, "%s\n", session->discovery_parent_type); else - len = sysfs_emit(buf, "\n"); + len = sprintf(buf, "\n"); break; default: return -ENOSYS; @@ -3515,16 +3521,16 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_HOST_PARAM_IPADDRESS: if (sin) - len = sysfs_emit(buf, "%pI4\n", &sin->sin_addr.s_addr); + len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); else - len = sysfs_emit(buf, "%pI6\n", &sin6->sin6_addr); + len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); break; case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_LOCAL_PORT: if (sin) - len = sysfs_emit(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); else - len = sysfs_emit(buf, "%hu\n", + len = sprintf(buf, "%hu\n", be16_to_cpu(sin6->sin6_port)); break; default: @@ -3543,88 +3549,88 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_PING_TMO: - len = sysfs_emit(buf, "%u\n", conn->ping_timeout); + len = sprintf(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: - len = sysfs_emit(buf, "%u\n", conn->recv_timeout); + len = sprintf(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: - len = sysfs_emit(buf, "%u\n", conn->max_recv_dlength); + len = sprintf(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - len = sysfs_emit(buf, "%u\n", conn->max_xmit_dlength); + len = sprintf(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: - len = sysfs_emit(buf, "%d\n", conn->hdrdgst_en); + len = sprintf(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: - len = sysfs_emit(buf, "%d\n", conn->datadgst_en); + len = sprintf(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: - len = sysfs_emit(buf, "%d\n", conn->ifmarker_en); + len = sprintf(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: - len = sysfs_emit(buf, "%d\n", conn->ofmarker_en); + len = sprintf(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: - len = sysfs_emit(buf, "%u\n", conn->exp_statsn); + len = sprintf(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: - len = sysfs_emit(buf, "%d\n", conn->persistent_port); + len = sprintf(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - len = sysfs_emit(buf, "%s\n", conn->persistent_address); + len = sprintf(buf, "%s\n", conn->persistent_address); break; case ISCSI_PARAM_STATSN: - len = sysfs_emit(buf, "%u\n", conn->statsn); + len = sprintf(buf, "%u\n", conn->statsn); break; case ISCSI_PARAM_MAX_SEGMENT_SIZE: - len = sysfs_emit(buf, "%u\n", conn->max_segment_size); + len = sprintf(buf, "%u\n", conn->max_segment_size); break; case ISCSI_PARAM_KEEPALIVE_TMO: - len = sysfs_emit(buf, "%u\n", conn->keepalive_tmo); + len = sprintf(buf, "%u\n", conn->keepalive_tmo); break; case ISCSI_PARAM_LOCAL_PORT: - len = sysfs_emit(buf, "%u\n", conn->local_port); + len = sprintf(buf, "%u\n", conn->local_port); break; case ISCSI_PARAM_TCP_TIMESTAMP_STAT: - len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_stat); + len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); break; case ISCSI_PARAM_TCP_NAGLE_DISABLE: - len = sysfs_emit(buf, "%u\n", conn->tcp_nagle_disable); + len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); break; case ISCSI_PARAM_TCP_WSF_DISABLE: - len = sysfs_emit(buf, "%u\n", conn->tcp_wsf_disable); + len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); break; case ISCSI_PARAM_TCP_TIMER_SCALE: - len = sysfs_emit(buf, "%u\n", conn->tcp_timer_scale); + len = sprintf(buf, "%u\n", conn->tcp_timer_scale); break; case ISCSI_PARAM_TCP_TIMESTAMP_EN: - len = sysfs_emit(buf, "%u\n", conn->tcp_timestamp_en); + len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); break; case ISCSI_PARAM_IP_FRAGMENT_DISABLE: - len = sysfs_emit(buf, "%u\n", conn->fragment_disable); + len = sprintf(buf, "%u\n", conn->fragment_disable); break; case ISCSI_PARAM_IPV4_TOS: - len = sysfs_emit(buf, "%u\n", conn->ipv4_tos); + len = sprintf(buf, "%u\n", conn->ipv4_tos); break; case ISCSI_PARAM_IPV6_TC: - len = sysfs_emit(buf, "%u\n", conn->ipv6_traffic_class); + len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); break; case ISCSI_PARAM_IPV6_FLOW_LABEL: - len = sysfs_emit(buf, "%u\n", conn->ipv6_flow_label); + len = sprintf(buf, "%u\n", conn->ipv6_flow_label); break; case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: - len = sysfs_emit(buf, "%u\n", conn->is_fw_assigned_ipv6); + len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); break; case ISCSI_PARAM_TCP_XMIT_WSF: - len = sysfs_emit(buf, "%u\n", conn->tcp_xmit_wsf); + len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); break; case ISCSI_PARAM_TCP_RECV_WSF: - len = sysfs_emit(buf, "%u\n", conn->tcp_recv_wsf); + len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); break; case ISCSI_PARAM_LOCAL_IPADDR: - len = sysfs_emit(buf, "%s\n", conn->local_ipaddr); + len = sprintf(buf, "%s\n", conn->local_ipaddr); break; default: return -ENOSYS; @@ -3642,13 +3648,13 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sysfs_emit(buf, "%s\n", ihost->netdev); + len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - len = sysfs_emit(buf, "%s\n", ihost->hwaddress); + len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - len = sysfs_emit(buf, "%s\n", ihost->initiatorname); + len = sprintf(buf, "%s\n", ihost->initiatorname); break; default: return -ENOSYS; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index eba0d4ef546d..68b33abeaa5f 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -216,17 +216,18 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; - task->data_dir = qc->dma_dir; - } else if (qc->tf.protocol == ATA_PROT_NODATA) { - task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg_dma_len(sg); task->total_xfer_len = xfer; task->num_scatter = si; - task->data_dir = qc->dma_dir; } + + if (qc->tf.protocol == ATA_PROT_NODATA) + task->data_dir = DMA_NONE; + else + task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 30e0730f613e..d3c5297c6c89 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -41,7 +41,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy static void sas_resume_port(struct asd_sas_phy *phy) { - struct domain_device *dev, *n; + struct domain_device *dev; struct asd_sas_port *port = phy->port; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); @@ -60,7 +60,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) * 1/ presume every device came back * 2/ force the next revalidation to check all expander phys */ - list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { + list_for_each_entry(dev, &port->dev_list, dev_list_node) { int i, rc; rc = sas_notify_lldd_dev_found(dev); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 034d09f8d341..25aa9b98d53a 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1061,7 +1061,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, memset(dstbuf, 0, 32); size = (nbytes < 32) ? nbytes : 32; if (copy_from_user(dstbuf, buf, size)) - return -EFAULT; + return 0; if (dent == phba->debug_InjErrLBA) { if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f')) @@ -1069,7 +1069,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, } if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp))) - return -EINVAL; + return 0; if (dent == phba->debug_writeGuard) phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b66b1ed6d2af..5be938b47f48 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1142,15 +1142,6 @@ stop_rr_fcf_flogi: phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } - } else if (vport->port_state > LPFC_FLOGI && - vport->fc_flag & FC_PT2PT) { - /* - * In a p2p topology, it is possible that discovery has - * already progressed, and this completion can be ignored. - * Recheck the indicated topology. - */ - if (!sp->cmn.fPort) - goto out; } flogifail: diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 1a44102b43c3..6aa0698925da 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1604,6 +1604,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); return ndlp->nlp_state; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2087125922a1..97c0d79a2601 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -15049,6 +15049,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, if (cmd_iocbq) { ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; lpfc_nlp_put(ndlp); + lpfc_nlp_not_used(ndlp); lpfc_sli_release_iocbq(phba, cmd_iocbq); } @@ -17071,7 +17072,6 @@ lpfc_drain_txq(struct lpfc_hba *phba) fail_msg, piocbq->iotag, piocbq->sli4_xritag); list_add_tail(&piocbq->list, &completions); - fail_msg = NULL; } spin_unlock_irqrestore(&pring->ring_lock, iflags); } diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 34067dfd2841..a70692779a16 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -246,7 +246,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) mimd_t mimd; uint32_t adapno; int iterator; - bool is_found; + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { *rval = -EFAULT; @@ -262,16 +262,12 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) adapter = NULL; iterator = 0; - is_found = false; list_for_each_entry(adapter, &adapters_list_g, list) { - if (iterator++ == adapno) { - is_found = true; - break; - } + if (iterator++ == adapno) break; } - if (!is_found) { + if (!adapter) { *rval = -ENODEV; return NULL; } @@ -739,7 +735,6 @@ ioctl_done(uioc_t *kioc) uint32_t adapno; int iterator; mraid_mmadp_t* adapter; - bool is_found; /* * When the kioc returns from driver, make sure it still doesn't @@ -762,23 +757,19 @@ ioctl_done(uioc_t *kioc) iterator = 0; adapter = NULL; adapno = kioc->adapno; - is_found = false; con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " "ioctl that was timedout before\n")); list_for_each_entry(adapter, &adapters_list_g, list) { - if (iterator++ == adapno) { - is_found = true; - break; - } + if (iterator++ == adapno) break; } kioc->timedout = 0; - if (is_found) + if (adapter) { mraid_mm_dealloc_kioc( adapter, kioc ); - + } } else { wake_up(&wait_q); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 0e39bb1489ac..8735e4257028 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2904,7 +2904,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) @@ -5014,10 +5014,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) handle, parent_handle, (unsigned long long) sas_expander->sas_address, sas_expander->num_phys); - if (!sas_expander->num_phys) { - rc = -1; + if (!sas_expander->num_phys) goto out_fail; - } sas_expander->phy = kcalloc(sas_expander->num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!sas_expander->phy) { diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index a982701bc3e0..062ab34b86f8 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1063,8 +1063,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm8001_init_sas_add(pm8001_ha); /* phy setting support for motherboard controller */ - rc = pm8001_configure_phy_settings(pm8001_ha); - if (rc) + if (pm8001_configure_phy_settings(pm8001_ha)) goto err_out_shost; pm8001_post_sas_ha_init(shost, chip); diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index fb30329e60f0..7686bfe9a4a9 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -112,6 +112,7 @@ extern int ql2xasynctmfenable; extern int ql2xgffidenable; extern int ql2xenabledif; extern int ql2xenablehba_err_chk; +extern int ql2xtargetreset; extern int ql2xdontresethba; extern uint64_t ql2xmaxlun; extern int ql2xmdcapmask; @@ -642,6 +643,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); extern void qlafx00_timer_routine(scsi_qla_host_t *); extern int qlafx00_rescan_isp(scsi_qla_host_t *); +extern int qlafx00_loop_reset(scsi_qla_host_t *vha); /* qla82xx related functions */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 4e75179e4368..b5029e543b91 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -737,6 +737,29 @@ qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); } +int +qlafx00_loop_reset(scsi_qla_host_t *vha) +{ + int ret; + struct fc_port *fcport; + struct qla_hw_data *ha = vha->hw; + + if (ql2xtargetreset) { + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + + ret = ha->isp_ops->target_reset(fcport, 0, 0); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_taskm, vha, 0x803d, + "Bus Reset failed: Reset=%d " + "d_id=%x.\n", ret, fcport->d_id.b24); + } + } + } + return QLA_SUCCESS; +} + int qlafx00_iospace_config(struct qla_hw_data *ha) { diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 46f7e3988009..65f8d2d94159 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1103,8 +1103,7 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, return ret; } - ret = qla82xx_flash_set_write_enable(ha); - if (ret < 0) + if (qla82xx_flash_set_write_enable(ha)) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4fa0cd3c3663..daafb60fa13e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -177,6 +177,12 @@ MODULE_PARM_DESC(ql2xdbwr, " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); +int ql2xtargetreset = 1; +module_param(ql2xtargetreset, int, S_IRUGO); +MODULE_PARM_DESC(ql2xtargetreset, + "Enable target reset." + "Default is 1 - use hw defaults."); + int ql2xgffidenable; module_param(ql2xgffidenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xgffidenable, @@ -1309,10 +1315,27 @@ int qla2x00_loop_reset(scsi_qla_host_t *vha) { int ret; + struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (IS_QLAFX00(ha)) - return QLA_SUCCESS; + if (IS_QLAFX00(ha)) { + return qlafx00_loop_reset(vha); + } + + if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + + ret = ha->isp_ops->target_reset(fcport, 0, 0); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_taskm, vha, 0x802c, + "Bus Reset failed: Reset=%d " + "d_id=%x.\n", ret, fcport->d_id.b24); + } + } + } + if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0a8a5841e1b8..1d9f19e5e0f8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1042,7 +1042,6 @@ void qlt_stop_phase2(struct qla_tgt *tgt) "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); - mutex_lock(&tgt->ha->optrom_mutex); mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); while (tgt->irq_cmd_count != 0) { @@ -1054,7 +1053,6 @@ void qlt_stop_phase2(struct qla_tgt *tgt) tgt->tgt_stopped = 1; spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&vha->vha_tgt.tgt_mutex); - mutex_unlock(&tgt->ha->optrom_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", tgt); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 7a6fafa8ba56..bca584ae45b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -112,6 +112,7 @@ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) #endif +#endif #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ @@ -322,7 +323,6 @@ struct ctio_to_2xxx { #ifndef CTIO_RET_TYPE #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ -#endif struct fcp_hdr { uint8_t r_ctl; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 74372aaf209d..c3e622524604 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -871,8 +871,7 @@ qla27xx_template_checksum(void *p, ulong size) static inline int qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) { - return qla27xx_template_checksum(tmp, - le32_to_cpu(tmp->template_size)) == 0; + return qla27xx_template_checksum(tmp, tmp->template_size) == 0; } static inline int @@ -888,7 +887,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) ulong len; if (qla27xx_fwdt_template_valid(tmp)) { - len = le32_to_cpu(tmp->template_size); + len = tmp->template_size; tmp = memcpy(vha->hw->fw_dump, tmp, len); ql27xx_edit_template(vha, tmp); qla27xx_walk_template(vha, tmp, tmp, &len); @@ -904,7 +903,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { - len = le32_to_cpu(tmp->template_size); + len = tmp->template_size; qla27xx_walk_template(vha, tmp, NULL, &len); } @@ -916,7 +915,7 @@ qla27xx_fwdt_template_size(void *p) { struct qla27xx_fwdt_template *tmp = p; - return le32_to_cpu(tmp->template_size); + return tmp->template_size; } ulong diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index 2d3e1a8349b3..141c1c5e73f4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -13,7 +13,7 @@ struct __packed qla27xx_fwdt_template { uint32_t template_type; uint32_t entry_offset; - __le32 template_size; + uint32_t template_size; uint32_t reserved_1; uint32_t entry_count; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index d59ce94fdf73..d07fb653f5dc 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -936,10 +936,8 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { - struct module *mod = sdev->host->hostt->module; - + module_put(sdev->host->hostt->module); put_device(&sdev->sdev_gendev); - module_put(mod); } EXPORT_SYMBOL(scsi_device_put); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 455cdd2c3fed..269198b46adb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1004,7 +1004,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) case 0x07: /* operation in progress */ case 0x08: /* Long write in progress */ case 0x09: /* self test in progress */ - case 0x11: /* notify (enable spinup) required */ case 0x14: /* space allocation in progress */ action = ACTION_DELAYED_RETRY; break; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fd8ebb9e66e6..271e8035f5fb 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -457,8 +457,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, error = shost->hostt->target_alloc(starget); if(error) { - if (error != -ENXIO) - dev_err(dev, "target allocation failed, error %d\n", error); + dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); /* don't want scsi_target_reap to do the final * put because it will be under the host lock */ scsi_target_destroy(starget); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index b6db6c049f20..1290c542f6d6 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -396,12 +396,9 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) struct device *parent; struct list_head *this, *tmp; unsigned long flags; - struct module *mod; sdev = container_of(work, struct scsi_device, ew.work); - mod = sdev->host->hostt->module; - scsi_dh_release_device(sdev); parent = sdev->sdev_gendev.parent; @@ -433,17 +430,11 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) if (parent) put_device(parent); - module_put(mod); } static void scsi_device_dev_release(struct device *dev) { struct scsi_device *sdp = to_scsi_device(dev); - - /* Set module pointer as NULL in case of module unloading */ - if (!try_module_get(sdp->host->hostt->module)) - sdp->host->hostt->module = NULL; - execute_in_process_context(scsi_device_dev_release_usercontext, &sdp->ew); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 269277c1d9dc..490364031648 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -119,11 +119,7 @@ show_transport_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_internal *priv = dev_to_iscsi_internal(dev); - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - return sysfs_emit(buf, "%llu\n", - (unsigned long long)iscsi_handle(priv->iscsi_transport)); + return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport)); } static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL); @@ -133,7 +129,7 @@ show_transport_##name(struct device *dev, \ struct device_attribute *attr,char *buf) \ { \ struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \ - return sysfs_emit(buf, format"\n", priv->iscsi_transport->name);\ + return sprintf(buf, format"\n", priv->iscsi_transport->name); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL); @@ -174,7 +170,7 @@ static ssize_t show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev); - return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id); + return sprintf(buf, "%llu\n", (unsigned long long) ep->id); } static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL); @@ -427,9 +423,40 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; - int param = -1; + int param; + int param_type; - if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) + if (attr == &dev_attr_iface_enabled.attr) + param = ISCSI_NET_PARAM_IFACE_ENABLE; + else if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; + else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; else if (attr == &dev_attr_iface_header_digest.attr) param = ISCSI_IFACE_PARAM_HDRDGST_EN; @@ -465,40 +492,6 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; else if (attr == &dev_attr_iface_initiator_name.attr) param = ISCSI_IFACE_PARAM_INITIATOR_NAME; - - if (param != -1) - return t->attr_is_visible(ISCSI_IFACE_PARAM, param); - - if (attr == &dev_attr_iface_enabled.attr) - param = ISCSI_NET_PARAM_IFACE_ENABLE; - else if (attr == &dev_attr_iface_vlan_id.attr) - param = ISCSI_NET_PARAM_VLAN_ID; - else if (attr == &dev_attr_iface_vlan_priority.attr) - param = ISCSI_NET_PARAM_VLAN_PRIORITY; - else if (attr == &dev_attr_iface_vlan_enabled.attr) - param = ISCSI_NET_PARAM_VLAN_ENABLED; - else if (attr == &dev_attr_iface_mtu.attr) - param = ISCSI_NET_PARAM_MTU; - else if (attr == &dev_attr_iface_port.attr) - param = ISCSI_NET_PARAM_PORT; - else if (attr == &dev_attr_iface_ipaddress_state.attr) - param = ISCSI_NET_PARAM_IPADDR_STATE; - else if (attr == &dev_attr_iface_delayed_ack_en.attr) - param = ISCSI_NET_PARAM_DELAYED_ACK_EN; - else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) - param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) - param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; - else if (attr == &dev_attr_iface_tcp_wsf.attr) - param = ISCSI_NET_PARAM_TCP_WSF; - else if (attr == &dev_attr_iface_tcp_timer_scale.attr) - param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; - else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) - param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; - else if (attr == &dev_attr_iface_cache_id.attr) - param = ISCSI_NET_PARAM_CACHE_ID; - else if (attr == &dev_attr_iface_redirect_en.attr) - param = ISCSI_NET_PARAM_REDIRECT_EN; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -589,7 +582,32 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, return 0; } - return t->attr_is_visible(ISCSI_NET_PARAM, param); + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_IFACE_PARAM_HDRDGST_EN: + case ISCSI_IFACE_PARAM_DATADGST_EN: + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + case ISCSI_IFACE_PARAM_ERL: + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + case ISCSI_IFACE_PARAM_FIRST_BURST: + case ISCSI_IFACE_PARAM_MAX_R2T: + case ISCSI_IFACE_PARAM_MAX_BURST: + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + param_type = ISCSI_IFACE_PARAM; + break; + default: + param_type = ISCSI_NET_PARAM; + } + + return t->attr_is_visible(param_type, param); } static struct attribute *iscsi_iface_attrs[] = { @@ -1896,12 +1914,12 @@ static void session_recovery_timedout(struct work_struct *work) } spin_unlock_irqrestore(&session->lock, flags); + if (session->transport->session_recovery_timedout) + session->transport->session_recovery_timedout(session); + ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n"); scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n"); - - if (session->transport->session_recovery_timedout) - session->transport->session_recovery_timedout(session); } static void __iscsi_unblock_session(struct work_struct *work) @@ -2306,18 +2324,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) } EXPORT_SYMBOL_GPL(iscsi_destroy_conn); -void iscsi_put_conn(struct iscsi_cls_conn *conn) -{ - put_device(&conn->dev); -} -EXPORT_SYMBOL_GPL(iscsi_put_conn); - -void iscsi_get_conn(struct iscsi_cls_conn *conn) -{ - get_device(&conn->dev); -} -EXPORT_SYMBOL_GPL(iscsi_get_conn); - /* * iscsi interface functions */ @@ -2777,9 +2783,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) struct iscsi_cls_session *session; int err = 0, value = 0; - if (ev->u.set_param.len > PAGE_SIZE) - return -EINVAL; - session = iscsi_session_lookup(ev->u.set_param.sid); conn = iscsi_conn_lookup(ev->u.set_param.sid, ev->u.set_param.cid); if (!conn || !session) @@ -2927,9 +2930,6 @@ iscsi_set_host_param(struct iscsi_transport *transport, if (!transport->set_host_param) return -ENOSYS; - if (ev->u.set_host_param.len > PAGE_SIZE) - return -EINVAL; - shost = scsi_host_lookup(ev->u.set_host_param.host_no); if (!shost) { printk(KERN_ERR "set_host_param could not find host no %u\n", @@ -3516,7 +3516,6 @@ static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; - u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; @@ -3524,9 +3523,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; - if (!netlink_capable(skb, CAP_SYS_ADMIN)) - return -EPERM; - if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) *group = ISCSI_NL_GRP_UIP; else @@ -3632,14 +3628,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SEND_PDU: - pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); - - if ((ev->u.send_pdu.hdr_size > pdu_len) || - (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { - err = -EINVAL; - break; - } - conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); if (conn) ev->r.retcode = transport->send_pdu(conn, @@ -4044,7 +4032,7 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sysfs_emit(buf, "%s\n", iscsi_session_state_name(session->state)); + return sprintf(buf, "%s\n", iscsi_session_state_name(session->state)); } static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state, NULL); @@ -4053,7 +4041,7 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sysfs_emit(buf, "%d\n", session->creator); + return sprintf(buf, "%d\n", session->creator); } static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator, NULL); @@ -4062,7 +4050,7 @@ show_priv_session_target_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent); - return sysfs_emit(buf, "%d\n", session->target_id); + return sprintf(buf, "%d\n", session->target_id); } static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO, show_priv_session_target_id, NULL); @@ -4075,8 +4063,8 @@ show_priv_session_##field(struct device *dev, \ struct iscsi_cls_session *session = \ iscsi_dev_to_session(dev->parent); \ if (session->field == -1) \ - return sysfs_emit(buf, "off\n"); \ - return sysfs_emit(buf, format"\n", session->field); \ + return sprintf(buf, "off\n"); \ + return sprintf(buf, format"\n", session->field); \ } #define iscsi_priv_session_attr_store(field) \ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 631570d4845d..71aa6a646a28 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3147,16 +3147,15 @@ static int sd_probe(struct device *dev) } device_initialize(&sdkp->dev); - sdkp->dev.parent = get_device(dev); + sdkp->dev.parent = dev; sdkp->dev.class = &sd_disk_class; dev_set_name(&sdkp->dev, "%s", dev_name(dev)); error = device_add(&sdkp->dev); - if (error) { - put_device(&sdkp->dev); - goto out; - } + if (error) + goto out_free_index; + get_device(dev); dev_set_drvdata(dev, sdkp); get_device(&sdkp->dev); /* prevent release before async_schedule */ diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 1aed965c33a3..01168acc864d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, static int ses_send_diag(struct scsi_device *sdev, int page_code, void *buf, int bufflen) { - int result; + u32 result; unsigned char cmd[] = { SEND_DIAGNOSTIC, diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index fad68cb028d6..b0f5220ae23a 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -71,7 +71,6 @@ static int snirm710_probe(struct platform_device *dev) struct NCR_700_Host_Parameters *hostdata; struct Scsi_Host *host; struct resource *res; - int rc; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) @@ -97,9 +96,7 @@ static int snirm710_probe(struct platform_device *dev) goto out_kfree; host->this_id = 7; host->base = base; - host->irq = rc = platform_get_irq(dev, 0); - if (rc < 0) - goto out_put_host; + host->irq = platform_get_irq(dev, 0); if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) { printk(KERN_ERR "snirm710: request_irq failed!\n"); goto out_put_host; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index e26d6cc3c871..7dd4d9ded249 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -216,8 +216,6 @@ static unsigned int sr_get_events(struct scsi_device *sdev) return DISK_EVENT_EJECT_REQUEST; else if (med->media_event_code == 2) return DISK_EVENT_MEDIA_CHANGE; - else if (med->media_event_code == 3) - return DISK_EVENT_MEDIA_CHANGE; return 0; } @@ -883,7 +881,7 @@ static void get_capabilities(struct scsi_cd *cd) /* allocate transfer buffer */ - buffer = kmalloc(512, GFP_KERNEL); + buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return; diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 629bfe1b2026..11a238cb2222 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -118,7 +118,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) density = (blocklength > 2048) ? 0x81 : 0x83; #endif - buffer = kmalloc(512, GFP_KERNEL); + buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) return -ENOMEM; @@ -166,7 +166,7 @@ int sr_cd_check(struct cdrom_device_info *cdi) if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; - buffer = kmalloc(512, GFP_KERNEL); + buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); if (!buffer) return -ENOMEM; diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3a3876091a9d..088a68ab4246 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -1267,8 +1267,8 @@ static int st_open(struct inode *inode, struct file *filp) spin_lock(&st_use_lock); if (STp->in_use) { spin_unlock(&st_use_lock); - DEBC_printk(STp, "Device already in use.\n"); scsi_tape_put(STp); + DEBC_printk(STp, "Device already in use.\n"); return (-EBUSY); } diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 167ae2d29e47..d50c5ed8f428 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -233,9 +233,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!esp->command_block) goto fail_unmap_regs_dma; - host->irq = err = platform_get_irq(dev, 0); - if (err < 0) - goto fail_unmap_command_block; + host->irq = platform_get_irq(dev, 0); err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "SUN3X ESP", esp); if (err < 0) diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index c18b00a13032..2798628020c7 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1526,6 +1526,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) if (!host->disable_lpm) { hba->caps |= UFSHCD_CAP_CLK_GATING; hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + hba->caps |= UFSHCD_CAP_CLK_SCALING; } hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 43ab440a6abc..be2793950686 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3116,6 +3116,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(lrbp); if (err) { + ufshcd_release(hba, false); lrbp->cmd = NULL; clear_bit_unlock(tag, &hba->lrb_in_use); ufshcd_release_all(hba); @@ -6751,17 +6752,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; + unsigned int tag; u32 pos; int err; - u8 resp = 0xF, lun; + u8 resp = 0xF; + struct ufshcd_lrb *lrbp; unsigned long flags; host = cmd->device->host; hba = shost_priv(host); + tag = cmd->request->tag; ufshcd_print_cmd_log(hba); - lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); - err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); + lrbp = &hba->lrb[tag]; + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { if (!err) err = resp; @@ -6770,7 +6774,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) /* clear the commands that were pending for corresponding LUN */ for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lun) { + if (hba->lrb[pos].lun == lrbp->lun) { err = ufshcd_clear_cmd(hba, pos); if (err) break; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 58e3f6db9928..9237427728ce 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -342,7 +342,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, } break; default: - pr_info("Unsupported virtio scsi event reason %x\n", event->reason); + pr_info("Unsupport virtio scsi event reason %x\n", event->reason); } } @@ -395,7 +395,7 @@ static void virtscsi_handle_event(struct work_struct *work) virtscsi_handle_param_change(vscsi, event); break; default: - pr_err("Unsupported virtio scsi event %x\n", event->event); + pr_err("Unsupport virtio scsi event %x\n", event->event); } virtscsi_kick_event(vscsi, event_node); } diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index b0fd017abebd..3f2a5d6c437b 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -558,16 +558,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: - /* - * Commands like INQUIRY may transfer less data than - * requested by the initiator via bufflen. Set residual - * count to make upper layer aware of the actual amount - * of data returned. There are cases when controller - * returns zero dataLen with non zero data - do not set - * residual count in that case. - */ - if (e->dataLen && (e->dataLen < scsi_bufflen(cmd))) - scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); + /* If everything went fine, let's move on.. */ cmd->result = (DID_OK << 16); break; diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 1682fa3671bc..bec81c2404f7 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -835,10 +835,8 @@ static int __init maple_bus_init(void) maple_queue_cache = KMEM_CACHE(maple_buffer, SLAB_HWCACHE_ALIGN); - if (!maple_queue_cache) { - retval = -ENOMEM; + if (!maple_queue_cache) goto cleanup_bothirqs; - } INIT_LIST_HEAD(&maple_waitq); INIT_LIST_HEAD(&maple_sentq); @@ -851,7 +849,6 @@ static int __init maple_bus_init(void) if (!mdev[i]) { while (i-- > 0) maple_free_dev(mdev[i]); - retval = -ENOMEM; goto cleanup_cache; } baseunits[i] = mdev[i]; diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c index 4d06d6cf9a94..300233085161 100644 --- a/drivers/soc/qcom/msm_minidump.c +++ b/drivers/soc/qcom/msm_minidump.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2017,2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -189,7 +189,6 @@ static int md_update_smem_table(const struct md_region *entry) int msm_minidump_add_region(const struct md_region *entry) { u32 entries; - u32 toc_init; struct md_region *mdr; int ret = 0; @@ -215,16 +214,6 @@ int msm_minidump_add_region(const struct md_region *entry) return -ENOMEM; } - toc_init = 0; - if (minidump_enabled) { - toc_init = 1; - if (region_idx >= MAX_NUM_ENTRIES) { - spin_unlock(&mdt_lock); - pr_err("Maximum regions in minidump table reached.\n"); - return -ENOMEM; - } - } - mdr = &minidump_table.entry[entries]; strlcpy(mdr->name, entry->name, sizeof(mdr->name)); mdr->virt_addr = entry->virt_addr; @@ -234,7 +223,7 @@ int msm_minidump_add_region(const struct md_region *entry) minidump_table.num_regions = entries + 1; - if (toc_init) + if (minidump_enabled) ret = md_update_smem_table(entry); else pendings++; diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 9397e8ba2646..c4f5e5bbb8dc 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -176,7 +176,7 @@ static struct platform_driver tegra_fuse_driver = { }; module_platform_driver(tegra_fuse_driver); -u32 __init tegra_fuse_read_spare(unsigned int spare) +bool __init tegra_fuse_read_spare(unsigned int spare) { unsigned int offset = fuse->soc->info->spare + spare * 4; diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index f368bd537308..10c2076d5089 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -62,7 +62,7 @@ struct tegra_fuse { void tegra_init_revision(void); void tegra_init_apbmisc(void); -u32 __init tegra_fuse_read_spare(unsigned int spare); +bool __init tegra_fuse_read_spare(unsigned int spare); u32 __init tegra_fuse_read_early(unsigned int offset); #ifdef CONFIG_ARCH_TEGRA_2x_SOC diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c index 4403b89561fd..5373f4c16b54 100644 --- a/drivers/soc/tegra/fuse/speedo-tegra210.c +++ b/drivers/soc/tegra/fuse/speedo-tegra210.c @@ -105,7 +105,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num) unsigned int i; for (i = 0; i < num; i++) - if (value < speedos[i]) + if (value < speedos[num]) return i; return -EINVAL; diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index 838b87adf48b..bc1b80ec6afe 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -752,9 +752,8 @@ static int knav_dma_probe(struct platform_device *pdev) pm_runtime_enable(kdev->dev); ret = pm_runtime_get_sync(kdev->dev); if (ret < 0) { - pm_runtime_put_noidle(kdev->dev); dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); - goto err_pm_disable; + return ret; } /* Initialise all packet dmas */ @@ -768,21 +767,13 @@ static int knav_dma_probe(struct platform_device *pdev) if (list_empty(&kdev->list)) { dev_err(dev, "no valid dma instance\n"); - ret = -ENODEV; - goto err_put_sync; + return -ENODEV; } debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, &knav_dma_debug_ops); return ret; - -err_put_sync: - pm_runtime_put_sync(kdev->dev); -err_pm_disable: - pm_runtime_disable(kdev->dev); - - return ret; } static int knav_dma_remove(struct platform_device *pdev) diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 1aff6659655e..8c03a80b482d 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -1717,7 +1717,6 @@ static int knav_queue_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); dev_err(dev, "Failed to enable QMSS\n"); return ret; } @@ -1785,10 +1784,9 @@ static int knav_queue_probe(struct platform_device *pdev) if (ret) goto err; - regions = of_get_child_by_name(node, "descriptor-regions"); + regions = of_get_child_by_name(node, "descriptor-regions"); if (!regions) { dev_err(dev, "descriptor-regions not specified\n"); - ret = -ENODEV; goto err; } ret = knav_queue_setup_regions(kdev, regions); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index e88269115682..1f4a1f02a2cd 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -718,7 +718,4 @@ if SPI_SLAVE endif # SPI_SLAVE -config SPI_DYNAMIC - def_bool ACPI || OF_DYNAMIC || SPI_SLAVE - endif # SPI diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index dfbcaaaee66f..27680b336454 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -742,7 +742,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) struct resource *res; int err; - master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); + master = spi_alloc_master(&pdev->dev, sizeof(*bs)); if (!master) { dev_err(&pdev->dev, "spi_alloc_master() failed\n"); return -ENOMEM; @@ -764,20 +764,23 @@ static int bcm2835_spi_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bs->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(bs->regs)) - return PTR_ERR(bs->regs); + if (IS_ERR(bs->regs)) { + err = PTR_ERR(bs->regs); + goto out_master_put; + } bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); - return err; + goto out_master_put; } bs->irq = platform_get_irq(pdev, 0); if (bs->irq <= 0) { dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); - return bs->irq ? bs->irq : -ENODEV; + err = bs->irq ? bs->irq : -ENODEV; + goto out_master_put; } clk_prepare_enable(bs->clk); @@ -792,20 +795,21 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), master); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_dma_release; + goto out_clk_disable; } err = spi_register_master(master); if (err) { dev_err(&pdev->dev, "could not register SPI master: %d\n", err); - goto out_dma_release; + goto out_clk_disable; } return 0; -out_dma_release: - bcm2835_dma_release(master); +out_clk_disable: clk_disable_unprepare(bs->clk); +out_master_put: + spi_master_put(master); return err; } diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 0b5aff090b2e..5ffc2765a8dd 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -381,7 +381,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) unsigned long clk_hz; int err; - master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs)); + master = spi_alloc_master(&pdev->dev, sizeof(*bs)); if (!master) { dev_err(&pdev->dev, "spi_alloc_master() failed\n"); return -ENOMEM; @@ -411,27 +411,30 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) /* the main area */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bs->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(bs->regs)) - return PTR_ERR(bs->regs); + if (IS_ERR(bs->regs)) { + err = PTR_ERR(bs->regs); + goto out_master_put; + } bs->clk = devm_clk_get(&pdev->dev, NULL); if ((!bs->clk) || (IS_ERR(bs->clk))) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); - return err; + goto out_master_put; } bs->irq = platform_get_irq(pdev, 0); if (bs->irq <= 0) { dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); - return bs->irq ? bs->irq : -ENODEV; + err = bs->irq ? bs->irq : -ENODEV; + goto out_master_put; } /* this also enables the HW block */ err = clk_prepare_enable(bs->clk); if (err) { dev_err(&pdev->dev, "could not prepare clock: %d\n", err); - return err; + goto out_master_put; } /* just checking if the clock returns a sane value */ @@ -464,6 +467,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) out_clk_disable: clk_disable_unprepare(bs->clk); +out_master_put: + spi_master_put(master); return err; } diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 05b5df04f3b8..5a6749881ff9 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -116,7 +116,6 @@ struct cdns_spi { void __iomem *regs; struct clk *ref_clk; struct clk *pclk; - unsigned int clk_rate; u32 speed_hz; const u8 *txbuf; u8 *rxbuf; @@ -258,7 +257,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, u32 ctrl_reg, baud_rate_val; unsigned long frequency; - frequency = xspi->clk_rate; + frequency = clk_get_rate(xspi->ref_clk); ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET); @@ -558,9 +557,8 @@ static int cdns_spi_probe(struct platform_device *pdev) master->set_cs = cdns_spi_chipselect; master->mode_bits = SPI_CPOL | SPI_CPHA; - xspi->clk_rate = clk_get_rate(xspi->ref_clk); /* Set to default valid value */ - master->max_speed_hz = xspi->clk_rate / 4; + master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; xspi->speed_hz = master->max_speed_hz; master->bits_per_word_mask = SPI_BPW_MASK(8); diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c index 64b64174ce2f..3b7d91d94fea 100644 --- a/drivers/spi/spi-dln2.c +++ b/drivers/spi/spi-dln2.c @@ -781,7 +781,7 @@ exit_free_master: static int dln2_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct dln2_spi *dln2 = spi_master_get_devdata(master); pm_runtime_disable(&pdev->dev); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index e58319e58ba4..c46c0738c734 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -773,10 +773,8 @@ static int img_spfi_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret) { - pm_runtime_put_noidle(dev); + if (ret) return ret; - } spfi_reset(spfi); pm_runtime_put(dev); diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 8e662e7a3518..2465259f6241 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -357,7 +357,6 @@ static int meson_spifc_probe(struct platform_device *pdev) return 0; out_clk: clk_disable_unprepare(spifc->clk); - pm_runtime_disable(spifc->dev); out_err: spi_master_put(master); return ret; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 59b5f4f41975..7840067062a8 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -429,7 +429,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) else mdata->state = MTK_SPI_IDLE; - if (!master->can_dma(master, NULL, trans)) { + if (!master->can_dma(master, master->cur_msg->spi, trans)) { if (trans->rx_buf) { if (mdata->xfer_len % 4) cnt = mdata->xfer_len / 4 + 1; diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 2eeb0fe2eed2..76a8425be227 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -251,7 +251,7 @@ static int omap1_spi100k_setup_transfer(struct spi_device *spi, else word_len = spi->bits_per_word; - if (word_len > 32) + if (spi->bits_per_word > 32) return -EINVAL; cs->word_len = word_len; @@ -435,7 +435,7 @@ err: static int omap1_spi100k_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); pm_runtime_disable(&pdev->dev); @@ -449,7 +449,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int omap1_spi100k_runtime_suspend(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); clk_disable_unprepare(spi100k->ick); @@ -460,7 +460,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev) static int omap1_spi100k_runtime_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); + struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); struct omap1_spi100k *spi100k = spi_master_get_devdata(master); int ret; diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index e294f21db206..5e5fd77e2711 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1710,13 +1710,12 @@ static int verify_controller_parameters(struct pl022 *pl022, return -EINVAL; } } else { - if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) { + if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) dev_err(&pl022->adev->dev, "Microwire half duplex mode requested," " but this is only available in the" " ST version of PL022\n"); - return -EINVAL; - } + return -EINVAL; } } return 0; diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index d696cdd961a9..cfcc5a9a5cc9 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1479,7 +1479,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) return -ENODEV; } - master = devm_spi_alloc_master(dev, sizeof(*drv_data)); + master = spi_alloc_master(dev, sizeof(struct driver_data)); if (!master) { dev_err(&pdev->dev, "cannot alloc spi_master\n"); pxa_ssp_free(ssp); @@ -1619,6 +1619,7 @@ out_error_clock_enabled: free_irq(ssp->irq, drv_data); out_error_master_alloc: + spi_master_put(master); pxa_ssp_free(ssp); return status; } diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c index 1d7fd6dbaf87..3641d0e20135 100644 --- a/drivers/spi/spi-rb4xx.c +++ b/drivers/spi/spi-rb4xx.c @@ -148,7 +148,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev) if (IS_ERR(spi_base)) return PTR_ERR(spi_base); - master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi)); + master = spi_alloc_master(&pdev->dev, sizeof(*rbspi)); if (!master) return -ENOMEM; diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S index 1565c792da07..059f2dc1fda2 100644 --- a/drivers/spi/spi-s3c24xx-fiq.S +++ b/drivers/spi/spi-s3c24xx-fiq.S @@ -36,6 +36,7 @@ @ and an offset to the irq acknowledgment word ENTRY(s3c24xx_spi_fiq_rx) +s3c24xx_spi_fix_rx: .word fiq_rx_end - fiq_rx_start .word fiq_rx_irq_ack - fiq_rx_start fiq_rx_start: @@ -49,7 +50,7 @@ fiq_rx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subsne pc, lr, #4 @@ return, still have work to do + subnes pc, lr, #4 @@ return, still have work to do @@ set IRQ controller so that next op will trigger IRQ mov fiq_rtmp, #0 @@ -61,6 +62,7 @@ fiq_rx_irq_ack: fiq_rx_end: ENTRY(s3c24xx_spi_fiq_txrx) +s3c24xx_spi_fiq_txrx: .word fiq_txrx_end - fiq_txrx_start .word fiq_txrx_irq_ack - fiq_txrx_start fiq_txrx_start: @@ -75,7 +77,7 @@ fiq_txrx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subsne pc, lr, #4 @@ return, still have work to do + subnes pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] @@ -87,6 +89,7 @@ fiq_txrx_irq_ack: fiq_txrx_end: ENTRY(s3c24xx_spi_fiq_tx) +s3c24xx_spi_fix_tx: .word fiq_tx_end - fiq_tx_start .word fiq_tx_irq_ack - fiq_tx_start fiq_tx_start: @@ -99,7 +102,7 @@ fiq_tx_start: strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ] subs fiq_rcount, fiq_rcount, #1 - subsne pc, lr, #4 @@ return, still have work to do + subnes pc, lr, #4 @@ return, still have work to do mov fiq_rtmp, #0 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ] diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c index f062ebb46e0e..502501187c9e 100644 --- a/drivers/spi/spi-sh.c +++ b/drivers/spi/spi-sh.c @@ -451,7 +451,7 @@ static int spi_sh_probe(struct platform_device *pdev) return -ENODEV; } - master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); + master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); if (master == NULL) { dev_err(&pdev->dev, "spi_alloc_master error.\n"); return -ENOMEM; @@ -469,14 +469,16 @@ static int spi_sh_probe(struct platform_device *pdev) break; default: dev_err(&pdev->dev, "No support width\n"); - return -ENODEV; + ret = -ENODEV; + goto error1; } ss->irq = irq; ss->master = master; ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (ss->addr == NULL) { dev_err(&pdev->dev, "ioremap error.\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error1; } INIT_LIST_HEAD(&ss->queue); spin_lock_init(&ss->lock); @@ -486,7 +488,8 @@ static int spi_sh_probe(struct platform_device *pdev) dev_name(master->dev.parent)); if (ss->workqueue == NULL) { dev_err(&pdev->dev, "create workqueue error\n"); - return -EBUSY; + ret = -EBUSY; + goto error1; } ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); @@ -513,6 +516,9 @@ static int spi_sh_probe(struct platform_device *pdev) free_irq(irq, ss); error2: destroy_workqueue(ss->workqueue); + error1: + spi_master_put(master); + return ret; } diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 079bdc4e65ff..48888ab630c2 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -249,10 +249,6 @@ static int sun6i_spi_transfer_one(struct spi_master *master, } sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg); - /* Finally enable the bus - doing so before might raise SCK to HIGH */ - reg = sun6i_spi_read(sspi, SUN6I_GBL_CTL_REG); - reg |= SUN6I_GBL_CTL_BUS_ENABLE; - sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg); /* Setup the transfer now... */ if (sspi->tx_buf) @@ -336,7 +332,7 @@ static int sun6i_spi_runtime_resume(struct device *dev) } sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, - SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP); + SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP); return 0; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index d1ca8f619b82..e37712bed0b2 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -801,7 +801,6 @@ static int tegra_spi_setup(struct spi_device *spi) ret = pm_runtime_get_sync(tspi->dev); if (ret < 0) { - pm_runtime_put_noidle(tspi->dev); dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); return ret; } @@ -1215,7 +1214,6 @@ static int tegra_spi_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 4b9541e1726a..b6558bb6f9df 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -564,7 +564,6 @@ static int tegra_sflash_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 88bfe7682a9e..cf2a329fd895 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -761,7 +761,6 @@ static int tegra_slink_setup(struct spi_device *spi) ret = pm_runtime_get_sync(tspi->dev); if (ret < 0) { - pm_runtime_put_noidle(tspi->dev); dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); return ret; } @@ -1198,7 +1197,6 @@ static int tegra_slink_resume(struct device *dev) ret = pm_runtime_get_sync(dev); if (ret < 0) { - pm_runtime_put_noidle(dev); dev_err(dev, "pm runtime failed, e = %d\n", ret); return ret; } @@ -1210,7 +1208,7 @@ static int tegra_slink_resume(struct device *dev) } #endif -static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) +static int tegra_slink_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_slink_data *tspi = spi_master_get_devdata(master); @@ -1222,7 +1220,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused tegra_slink_runtime_resume(struct device *dev) +static int tegra_slink_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_slink_data *tspi = spi_master_get_devdata(master); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 6e97f71a8cea..5044c6198332 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -159,7 +159,6 @@ static int ti_qspi_setup(struct spi_device *spi) ret = pm_runtime_get_sync(qspi->dev); if (ret < 0) { - pm_runtime_put_noidle(qspi->dev); dev_err(qspi->dev, "pm_runtime_get_sync() failed\n"); return ret; } diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 66c170e799fc..9f30a4ab2004 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -589,10 +589,8 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); if (data->pkt_tx_buff != NULL) { data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); - if (!data->pkt_rx_buff) { + if (!data->pkt_rx_buff) kfree(data->pkt_tx_buff); - data->pkt_tx_buff = NULL; - } } if (!data->pkt_rx_buff) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e05f8317660a..2dc0e8b8ecde 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -418,12 +418,6 @@ static LIST_HEAD(spi_master_list); */ static DEFINE_MUTEX(board_lock); -/* - * Prevents addition of devices with same chip select and - * addition of devices below an unregistering controller. - */ -static DEFINE_MUTEX(spi_add_lock); - /** * spi_alloc_device - Allocate a new SPI device * @master: Controller to which device is connected @@ -502,6 +496,7 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { + static DEFINE_MUTEX(spi_add_lock); struct spi_master *master = spi->master; struct device *dev = master->dev.parent; int status; @@ -530,13 +525,6 @@ int spi_add_device(struct spi_device *spi) goto done; } - /* Controller may unregister concurrently */ - if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && - !device_is_registered(&master->dev)) { - status = -ENODEV; - goto done; - } - if (master->cs_gpios) spi->cs_gpio = master->cs_gpios[spi->chip_select]; @@ -1858,47 +1846,6 @@ struct spi_master *__spi_alloc_controller(struct device *dev, } EXPORT_SYMBOL_GPL(__spi_alloc_controller); -static void devm_spi_release_master(struct device *dev, void *master) -{ - spi_master_put(*(struct spi_master **)master); -} - -/** - * devm_spi_alloc_master - resource-managed spi_alloc_master() - * @dev: physical device of SPI master - * @size: how much zeroed driver-private data to allocate - * Context: can sleep - * - * Allocate an SPI master and automatically release a reference on it - * when @dev is unbound from its driver. Drivers are thus relieved from - * having to call spi_master_put(). - * - * The arguments to this function are identical to spi_alloc_master(). - * - * Return: the SPI master structure on success, else NULL. - */ -struct spi_master *devm_spi_alloc_master(struct device *dev, unsigned int size) -{ - struct spi_master **ptr, *master; - - ptr = devres_alloc(devm_spi_release_master, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return NULL; - - master = spi_alloc_master(dev, size); - if (master) { - master->devm_allocated = true; - *ptr = master; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } - - return master; -} -EXPORT_SYMBOL_GPL(devm_spi_alloc_master); - #ifdef CONFIG_OF static int of_spi_register_master(struct spi_master *master) { @@ -2099,10 +2046,6 @@ static int __unregister(struct device *dev, void *null) */ void spi_unregister_master(struct spi_master *master) { - /* Prevent addition of new devices, unregister existing ones */ - if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) - mutex_lock(&spi_add_lock); - device_for_each_child(&master->dev, NULL, __unregister); if (master->queued) { @@ -2114,16 +2057,7 @@ void spi_unregister_master(struct spi_master *master) list_del(&master->list); mutex_unlock(&board_lock); - device_del(&master->dev); - - /* Release the last reference on the master if its driver - * has not yet been converted to devm_spi_alloc_master(). - */ - if (!master->devm_allocated) - put_device(&master->dev); - - if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) - mutex_unlock(&spi_add_lock); + device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c index 5e10514ef80c..2278e43614bd 100644 --- a/drivers/ssb/sdio.c +++ b/drivers/ssb/sdio.c @@ -411,6 +411,7 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer, sdio_claim_host(bus->host_sdio); if (unlikely(ssb_sdio_switch_core(bus, dev))) { error = -EIO; + memset((void *)buffer, 0xff, count); goto err_out; } offset |= bus->sdio_sbaddr & 0xffff; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 5fa0e5eef718..9a0b73bc1379 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -749,9 +749,6 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer) void *vaddr; if (buffer->kmap_cnt) { - if (buffer->kmap_cnt == INT_MAX) - return ERR_PTR(-EOVERFLOW); - buffer->kmap_cnt++; return buffer->vaddr; } @@ -772,9 +769,6 @@ static void *ion_handle_kmap_get(struct ion_handle *handle) void *vaddr; if (handle->kmap_cnt) { - if (handle->kmap_cnt == INT_MAX) - return ERR_PTR(-EOVERFLOW); - handle->kmap_cnt++; return buffer->vaddr; } diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c index a9aa89cf8778..954ed2c5d807 100644 --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c @@ -269,8 +269,7 @@ static int do_create_fd_scoped_permission( atomic_t *owner_ptr = NULL; struct vsoc_device_region *managed_region_p; - if (copy_from_user(&np->permission, - &arg->perm, sizeof(np->permission)) || + if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) || copy_from_user(&managed_fd, &arg->managed_region_fd, sizeof(managed_fd))) { return -EFAULT; diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 9058ef473c33..536a135cd00b 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -269,7 +269,6 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) struct apci1032_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; unsigned int ctrl; - unsigned short val; /* check interrupt is from this device */ if ((inl(devpriv->amcc_iobase + AMCC_OP_REG_INTCSR) & @@ -285,8 +284,7 @@ static irqreturn_t apci1032_interrupt(int irq, void *d) outl(ctrl & ~APCI1032_CTRL_INT_ENA, dev->iobase + APCI1032_CTRL_REG); s->state = inl(dev->iobase + APCI1032_STATUS_REG) & 0xffff; - val = s->state; - comedi_buf_write_samples(s, &val, 1); + comedi_buf_write_samples(s, &s->state, 1); comedi_handle_events(dev, s); /* enable the interrupt */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 1f25f565041c..c4e36fb6df9d 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -217,7 +217,7 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) struct comedi_device *dev = d; struct apci1500_private *devpriv = dev->private; struct comedi_subdevice *s = dev->read_subdev; - unsigned short status = 0; + unsigned int status = 0; unsigned int val; val = inl(devpriv->amcc + AMCC_OP_REG_INTCSR); @@ -247,14 +247,14 @@ static irqreturn_t apci1500_interrupt(int irq, void *d) * * Mask Meaning * ---------- ------------------------------------------ - * 0b00000001 Event 1 has occurred - * 0b00000010 Event 2 has occurred - * 0b00000100 Counter/timer 1 has run down (not implemented) - * 0b00001000 Counter/timer 2 has run down (not implemented) - * 0b00010000 Counter 3 has run down (not implemented) - * 0b00100000 Watchdog has run down (not implemented) - * 0b01000000 Voltage error - * 0b10000000 Short-circuit error + * 0x00000001 Event 1 has occurred + * 0x00000010 Event 2 has occurred + * 0x00000100 Counter/timer 1 has run down (not implemented) + * 0x00001000 Counter/timer 2 has run down (not implemented) + * 0x00010000 Counter 3 has run down (not implemented) + * 0x00100000 Watchdog has run down (not implemented) + * 0x01000000 Voltage error + * 0x10000000 Short-circuit error */ comedi_buf_write_samples(s, &status, 1); comedi_handle_events(dev, s); diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c index 8599258f8c8d..399c511cfe0a 100644 --- a/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/drivers/staging/comedi/drivers/adv_pci1710.c @@ -351,11 +351,11 @@ static int pci171x_ai_eoc(struct comedi_device *dev, static int pci171x_ai_read_sample(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int cur_chan, - unsigned short *val) + unsigned int *val) { const struct boardtype *board = dev->board_ptr; struct pci1710_private *devpriv = dev->private; - unsigned short sample; + unsigned int sample; unsigned int chan; sample = inw(dev->iobase + PCI171X_AD_DATA_REG); @@ -395,7 +395,7 @@ static int pci171x_ai_insn_read(struct comedi_device *dev, pci171x_ai_setup_chanlist(dev, s, &insn->chanspec, 1, 1); for (i = 0; i < insn->n; i++) { - unsigned short val; + unsigned int val; /* start conversion */ outw(0, dev->iobase + PCI171X_SOFTTRG_REG); @@ -516,7 +516,7 @@ static void pci1710_handle_every_sample(struct comedi_device *dev, { struct comedi_cmd *cmd = &s->async->cmd; unsigned int status; - unsigned short val; + unsigned int val; int ret; status = inw(dev->iobase + PCI171X_STATUS_REG); @@ -576,7 +576,7 @@ static void pci1710_handle_fifo(struct comedi_device *dev, } for (i = 0; i < devpriv->max_samples; i++) { - unsigned short val; + unsigned int val; int ret; ret = pci171x_ai_read_sample(dev, s, s->async->cur_chan, &val); diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c index 15b9cc8531f0..3ea15bb0e56e 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas.c +++ b/drivers/staging/comedi/drivers/cb_pcidas.c @@ -1290,7 +1290,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev, devpriv->amcc + AMCC_OP_REG_INTCSR); ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED, - "cb_pcidas", dev); + dev->board_name, dev); if (ret) { dev_dbg(dev->class_dev, "unable to allocate irq %d\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c index 93d8c0b06d55..d33b8fe872a7 100644 --- a/drivers/staging/comedi/drivers/cb_pcidas64.c +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c @@ -4040,7 +4040,7 @@ static int auto_attach(struct comedi_device *dev, init_stc_registers(dev); retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED, - "cb_pcidas64", dev); + dev->board_name, dev); if (retval) { dev_dbg(dev->class_dev, "unable to allocate irq %u\n", pcidev->irq); diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c index 666d199983ba..1701294b79cd 100644 --- a/drivers/staging/comedi/drivers/das6402.c +++ b/drivers/staging/comedi/drivers/das6402.c @@ -193,7 +193,7 @@ static irqreturn_t das6402_interrupt(int irq, void *d) if (status & DAS6402_STATUS_FFULL) { async->events |= COMEDI_CB_OVERFLOW; } else if (status & DAS6402_STATUS_FFNE) { - unsigned short val; + unsigned int val; val = das6402_ai_read_sample(dev, s); comedi_buf_write_samples(s, &val, 1); diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c index 91a0c4ec7fdb..b02f12201cf7 100644 --- a/drivers/staging/comedi/drivers/das800.c +++ b/drivers/staging/comedi/drivers/das800.c @@ -436,7 +436,7 @@ static irqreturn_t das800_interrupt(int irq, void *d) struct comedi_cmd *cmd; unsigned long irq_flags; unsigned int status; - unsigned short val; + unsigned int val; bool fifo_empty; bool fifo_overflow; int i; diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c index 48645ecde810..958c0d4aae5c 100644 --- a/drivers/staging/comedi/drivers/dmm32at.c +++ b/drivers/staging/comedi/drivers/dmm32at.c @@ -411,7 +411,7 @@ static irqreturn_t dmm32at_isr(int irq, void *d) { struct comedi_device *dev = d; unsigned char intstat; - unsigned short val; + unsigned int val; int i; if (!dev->attached) { diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index e758eb3d2d19..3295bb4ac8c4 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include "../comedi_usb.h" @@ -247,42 +246,22 @@ static int dt9812_read_info(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd *cmd; - size_t tbuf_size; + struct dt9812_usb_cmd cmd; int count, ret; - void *tbuf; - tbuf_size = max(sizeof(*cmd), buf_size); - - tbuf = kzalloc(tbuf_size, GFP_KERNEL); - if (!tbuf) - return -ENOMEM; - - cmd = tbuf; - - cmd->cmd = cpu_to_le32(DT9812_R_FLASH_DATA); - cmd->u.flash_data_info.address = + cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA); + cmd.u.flash_data_info.address = cpu_to_le16(DT9812_DIAGS_BOARD_INFO_ADDR + offset); - cmd->u.flash_data_info.numbytes = cpu_to_le16(buf_size); + cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size); /* DT9812 only responds to 32 byte writes!! */ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); + &cmd, 32, &count, DT9812_USB_TIMEOUT); if (ret) - goto out; - - ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), - tbuf, buf_size, &count, DT9812_USB_TIMEOUT); - if (!ret) { - if (count == buf_size) - memcpy(buf, tbuf, buf_size); - else - ret = -EREMOTEIO; - } -out: - kfree(tbuf); + return ret; - return ret; + return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), + buf, buf_size, &count, DT9812_USB_TIMEOUT); } static int dt9812_read_multiple_registers(struct comedi_device *dev, @@ -291,42 +270,22 @@ static int dt9812_read_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd *cmd; + struct dt9812_usb_cmd cmd; int i, count, ret; - size_t buf_size; - void *buf; - buf_size = max_t(size_t, sizeof(*cmd), reg_count); - - buf = kzalloc(buf_size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - cmd = buf; - - cmd->cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); - cmd->u.read_multi_info.count = reg_count; + cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG); + cmd.u.read_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) - cmd->u.read_multi_info.address[i] = address[i]; + cmd.u.read_multi_info.address[i] = address[i]; /* DT9812 only responds to 32 byte writes!! */ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); + &cmd, 32, &count, DT9812_USB_TIMEOUT); if (ret) - goto out; - - ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), - buf, reg_count, &count, DT9812_USB_TIMEOUT); - if (!ret) { - if (count == reg_count) - memcpy(value, buf, reg_count); - else - ret = -EREMOTEIO; - } -out: - kfree(buf); + return ret; - return ret; + return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr), + value, reg_count, &count, DT9812_USB_TIMEOUT); } static int dt9812_write_multiple_registers(struct comedi_device *dev, @@ -335,27 +294,19 @@ static int dt9812_write_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd *cmd; + struct dt9812_usb_cmd cmd; int i, count; - int ret; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - cmd->cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); - cmd->u.read_multi_info.count = reg_count; + cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG); + cmd.u.read_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) { - cmd->u.write_multi_info.write[i].address = address[i]; - cmd->u.write_multi_info.write[i].value = value[i]; + cmd.u.write_multi_info.write[i].address = address[i]; + cmd.u.write_multi_info.write[i].value = value[i]; } /* DT9812 only responds to 32 byte writes!! */ - ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); - kfree(cmd); - - return ret; + return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), + &cmd, 32, &count, DT9812_USB_TIMEOUT); } static int dt9812_rmw_multiple_registers(struct comedi_device *dev, @@ -364,25 +315,17 @@ static int dt9812_rmw_multiple_registers(struct comedi_device *dev, { struct usb_device *usb = comedi_to_usb_dev(dev); struct dt9812_private *devpriv = dev->private; - struct dt9812_usb_cmd *cmd; + struct dt9812_usb_cmd cmd; int i, count; - int ret; - - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - cmd->cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); - cmd->u.rmw_multi_info.count = reg_count; + cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG); + cmd.u.rmw_multi_info.count = reg_count; for (i = 0; i < reg_count; i++) - cmd->u.rmw_multi_info.rmw[i] = rmw[i]; + cmd.u.rmw_multi_info.rmw[i] = rmw[i]; /* DT9812 only responds to 32 byte writes!! */ - ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), - cmd, sizeof(*cmd), &count, DT9812_USB_TIMEOUT); - kfree(cmd); - - return ret; + return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr), + &cmd, 32, &count, DT9812_USB_TIMEOUT); } static int dt9812_digital_in(struct comedi_device *dev, u8 *bits) @@ -717,12 +660,12 @@ static int dt9812_find_endpoints(struct comedi_device *dev) case 1: dir = USB_DIR_OUT; devpriv->cmd_wr.addr = ep->bEndpointAddress; - devpriv->cmd_wr.size = usb_endpoint_maxp(ep); + devpriv->cmd_wr.size = le16_to_cpu(ep->wMaxPacketSize); break; case 2: dir = USB_DIR_IN; devpriv->cmd_rd.addr = ep->bEndpointAddress; - devpriv->cmd_rd.size = usb_endpoint_maxp(ep); + devpriv->cmd_rd.size = le16_to_cpu(ep->wMaxPacketSize); break; case 3: /* unused write stream */ diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c index 4fe856128870..15a53204a36a 100644 --- a/drivers/staging/comedi/drivers/me4000.c +++ b/drivers/staging/comedi/drivers/me4000.c @@ -933,7 +933,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id) struct comedi_subdevice *s = dev->read_subdev; int i; int c = 0; - unsigned short lval; + unsigned int lval; if (!dev->attached) return IRQ_NONE; diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c index 40aa24a9b2c3..fbdf181d8ccc 100644 --- a/drivers/staging/comedi/drivers/mf6x4.c +++ b/drivers/staging/comedi/drivers/mf6x4.c @@ -121,9 +121,8 @@ static int mf6x4_ai_eoc(struct comedi_device *dev, struct mf6x4_private *devpriv = dev->private; unsigned int status; - /* EOLC goes low at end of conversion. */ status = ioread32(devpriv->gpioc_reg); - if ((status & MF6X4_GPIOC_EOLC) == 0) + if (status & MF6X4_GPIOC_EOLC) return 0; return -EBUSY; } diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c index 7f647d80ec05..6778e2b73667 100644 --- a/drivers/staging/comedi/drivers/ni_usb6501.c +++ b/drivers/staging/comedi/drivers/ni_usb6501.c @@ -153,10 +153,6 @@ static const u8 READ_COUNTER_RESPONSE[] = {0x00, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00}; -/* Largest supported packets */ -static const size_t TX_MAX_SIZE = sizeof(SET_PORT_DIR_REQUEST); -static const size_t RX_MAX_SIZE = sizeof(READ_PORT_RESPONSE); - enum commands { READ_PORT, WRITE_PORT, @@ -469,12 +465,12 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev) struct ni6501_private *devpriv = dev->private; size_t size; - size = usb_endpoint_maxp(devpriv->ep_rx); + size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; - size = usb_endpoint_maxp(devpriv->ep_tx); + size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; @@ -514,12 +510,6 @@ static int ni6501_find_endpoints(struct comedi_device *dev) if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; - if (usb_endpoint_maxp(devpriv->ep_rx) < RX_MAX_SIZE) - return -ENODEV; - - if (usb_endpoint_maxp(devpriv->ep_tx) < TX_MAX_SIZE) - return -ENODEV; - return 0; } diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c index e1334733abe7..3774daa9d661 100644 --- a/drivers/staging/comedi/drivers/pcl711.c +++ b/drivers/staging/comedi/drivers/pcl711.c @@ -193,7 +193,7 @@ static irqreturn_t pcl711_interrupt(int irq, void *d) struct comedi_device *dev = d; struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; - unsigned short data; + unsigned int data; if (!dev->attached) { dev_err(dev->class_dev, "spurious interrupt\n"); diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c index f06241712308..5aeed44dff70 100644 --- a/drivers/staging/comedi/drivers/pcl818.c +++ b/drivers/staging/comedi/drivers/pcl818.c @@ -422,7 +422,7 @@ static int pcl818_ai_eoc(struct comedi_device *dev, static bool pcl818_ai_write_sample(struct comedi_device *dev, struct comedi_subdevice *s, - unsigned int chan, unsigned short val) + unsigned int chan, unsigned int val) { struct pcl818_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 36470ee06596..51f9a7800edf 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -99,9 +99,6 @@ enum { #define IC3_VERSION BIT(0) #define IC6_VERSION BIT(1) -#define MIN_BUF_SIZE 64 -#define PACKET_TIMEOUT 10000 /* ms */ - enum vmk80xx_model { VMK8055_MODEL, VMK8061_MODEL @@ -169,21 +166,22 @@ static void vmk80xx_do_bulk_msg(struct comedi_device *dev) __u8 rx_addr; unsigned int tx_pipe; unsigned int rx_pipe; - size_t tx_size; - size_t rx_size; + size_t size; tx_addr = devpriv->ep_tx->bEndpointAddress; rx_addr = devpriv->ep_rx->bEndpointAddress; tx_pipe = usb_sndbulkpipe(usb, tx_addr); rx_pipe = usb_rcvbulkpipe(usb, rx_addr); - tx_size = usb_endpoint_maxp(devpriv->ep_tx); - rx_size = usb_endpoint_maxp(devpriv->ep_rx); - usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, tx_size, NULL, - PACKET_TIMEOUT); + /* + * The max packet size attributes of the K8061 + * input/output endpoints are identical + */ + size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize); - usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, rx_size, NULL, - PACKET_TIMEOUT); + usb_bulk_msg(usb, tx_pipe, devpriv->usb_tx_buf, + size, NULL, devpriv->ep_tx->bInterval); + usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10); } static int vmk80xx_read_packet(struct comedi_device *dev) @@ -201,8 +199,8 @@ static int vmk80xx_read_packet(struct comedi_device *dev) ep = devpriv->ep_rx; pipe = usb_rcvintpipe(usb, ep->bEndpointAddress); return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf, - usb_endpoint_maxp(ep), NULL, - PACKET_TIMEOUT); + le16_to_cpu(ep->wMaxPacketSize), NULL, + HZ * 10); } static int vmk80xx_write_packet(struct comedi_device *dev, int cmd) @@ -222,8 +220,8 @@ static int vmk80xx_write_packet(struct comedi_device *dev, int cmd) ep = devpriv->ep_tx; pipe = usb_sndintpipe(usb, ep->bEndpointAddress); return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf, - usb_endpoint_maxp(ep), NULL, - PACKET_TIMEOUT); + le16_to_cpu(ep->wMaxPacketSize), NULL, + HZ * 10); } static int vmk80xx_reset_device(struct comedi_device *dev) @@ -232,7 +230,7 @@ static int vmk80xx_reset_device(struct comedi_device *dev) size_t size; int retval; - size = usb_endpoint_maxp(devpriv->ep_tx); + size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize); memset(devpriv->usb_tx_buf, 0, size); retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST); if (retval) @@ -689,12 +687,12 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) struct vmk80xx_private *devpriv = dev->private; size_t size; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = le16_to_cpu(devpriv->ep_rx->wMaxPacketSize); devpriv->usb_rx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_rx_buf) return -ENOMEM; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index 102eee7d2e4f..91ff8fb0cc3a 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -2193,7 +2193,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, int status) { - struct nbu2ss_req *req, *n; + struct nbu2ss_req *req; /* Endpoint Disable */ _nbu2ss_epn_exit(udc, ep); @@ -2205,7 +2205,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, return 0; /* called with irqs blocked */ - list_for_each_entry_safe(req, n, &ep->queue, queue) { + list_for_each_entry(req, &ep->queue, queue) { _nbu2ss_ep_done(ep, req, status); } diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index ad499b04ed9f..3ccdec94fee7 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -350,10 +350,7 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - ret = platform_driver_register(&fbtft_driver_platform_driver); \ - if (ret < 0) \ - spi_unregister_driver(&fbtft_driver_spi_driver); \ - return ret; \ + return platform_driver_register(&fbtft_driver_platform_driver); \ } \ \ static void __exit fbtft_driver_module_exit(void) \ diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index 68ed97398faf..b3ea4bb54e2c 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -2255,7 +2255,6 @@ static int fwserial_create(struct fw_unit *unit) err = fw_core_add_address_handler(&port->rx_handler, &fw_high_memory_region); if (err) { - tty_port_destroy(&port->port); kfree(port); goto free_ports; } @@ -2338,7 +2337,6 @@ unregister_ttys: free_ports: for (--i; i >= 0; --i) { - fw_core_remove_address_handler(&serial->ports[i]->rx_handler); tty_port_destroy(&serial->ports[i]->port); kfree(serial->ports[i]); } diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 8561f7fb53e9..79de678807cc 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -624,12 +624,10 @@ static void gdm_lte_netif_rx(struct net_device *dev, char *buf, * bytes (99,130,83,99 dec) */ } __packed; - int offset = sizeof(struct iphdr) + - sizeof(struct udphdr) + - offsetof(struct dhcp_packet, chaddr); - if (offset + ETH_ALEN > len) - return; - ether_addr_copy(nic->dest_mac_addr, buf + offset); + void *addr = buf + sizeof(struct iphdr) + + sizeof(struct udphdr) + + offsetof(struct dhcp_packet, chaddr); + ether_addr_copy(nic->dest_mac_addr, addr); } } @@ -691,7 +689,6 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) struct multi_sdu *multi_sdu = (struct multi_sdu *)buf; struct sdu *sdu = NULL; u8 *data = (u8 *)multi_sdu->data; - int copied; u16 i = 0; u16 num_packet; u16 hci_len; @@ -705,12 +702,6 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) multi_sdu->num_packet); for (i = 0; i < num_packet; i++) { - copied = data - multi_sdu->data; - if (len < copied + sizeof(*sdu)) { - pr_err("rx prevent buffer overflow"); - return; - } - sdu = (struct sdu *)data; cmd_evt = gdm_dev16_to_cpu(phy_dev-> @@ -724,8 +715,7 @@ static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len) pr_err("rx sdu wrong hci %04x\n", cmd_evt); return; } - if (hci_len < 12 || - len < copied + sizeof(*sdu) + (hci_len - 12)) { + if (hci_len < 12) { pr_err("rx sdu invalid len %d\n", hci_len); return; } diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index 6604c0508290..2c5d27784ed3 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -714,6 +714,7 @@ static int ad7746_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); else indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2; + indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); indio_dev->modes = INDIO_DIRECT_MODE; if (pdata) { diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c index da2a2ff4cdb9..3100d960fe2c 100644 --- a/drivers/staging/iio/light/tsl2583.c +++ b/drivers/staging/iio/light/tsl2583.c @@ -378,15 +378,6 @@ static int taos_als_calibrate(struct iio_dev *indio_dev) dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n"); return lux_val; } - - /* Avoid division by zero of lux_value later on */ - if (lux_val == 0) { - dev_err(&chip->client->dev, - "%s: lux_val of 0 will produce out of range trim_value\n", - __func__); - return -ENODATA; - } - gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target) * chip->taos_settings.als_gain_trim) / lux_val); diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index 130d09d28e1d..aa76ccda5b42 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -1406,10 +1406,8 @@ static int iss_probe(struct platform_device *pdev) if (ret < 0) goto error; - if (!omap4iss_get(iss)) { - ret = -EINVAL; + if (!omap4iss_get(iss)) goto error; - } ret = iss_reset(iss); if (ret < 0) diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c index 532ec0f7100e..9c645801cff4 100644 --- a/drivers/staging/most/aim-sound/sound.c +++ b/drivers/staging/most/aim-sound/sound.c @@ -92,8 +92,6 @@ static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes) { unsigned int i = 0; - if (bytes < 2) - return; while (i < bytes - 2) { dest[i] = source[i + 2]; dest[i + 1] = source[i + 1]; diff --git a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h index 311e5cee0f4a..ec5bbf0a7488 100644 --- a/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h +++ b/drivers/staging/qca-wifi-host-cmn/dp/inc/cdp_txrx_peer_ops.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -170,16 +170,4 @@ void ol_txrx_remove_peers_for_vdev_no_lock(ol_txrx_vdev_handle vdev, ol_txrx_vdev_peer_remove_cb callback, void *callback_context); - -/** - * ol_txrx_peer_flush_frags() - Flush fragments for a particular peer - * @pdev - datapath pdev handle - * @vdev_id - virtual device id - * @peer_mac - peer mac address - * - * Return: None - */ -void -ol_txrx_peer_flush_frags(ol_txrx_pdev_handle pdev, uint8_t vdev_id, - uint8_t *peer_mac); #endif /* _CDP_TXRX_PEER_H_ */ diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h index 3c57cdc09cea..14611e10ec5a 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -138,10 +138,10 @@ wmi_unified_remove_work(struct wmi_unified *wmi_handle); #ifdef MEMORY_DEBUG #define wmi_buf_alloc(h, l) wmi_buf_alloc_debug(h, l, __FILE__, __LINE__) wmi_buf_t -wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, uint8_t *file_name, uint32_t line_num); #else -wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint32_t len); +wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint16_t len); #endif /** diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c index 2123435149e4..9aa0a8b0faad 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1201,8 +1201,8 @@ int wmi_get_host_credits(wmi_unified_t wmi_handle); #ifdef MEMORY_DEBUG wmi_buf_t -wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len, uint8_t *file_name, - uint32_t line_num) +wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint16_t len, uint8_t *file_name, + uint32_t line_num) { wmi_buf_t wmi_buf; @@ -1235,7 +1235,7 @@ void wmi_buf_free(wmi_buf_t net_buf) qdf_nbuf_free(net_buf); } #else -wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint32_t len) +wmi_buf_t wmi_buf_alloc(wmi_unified_t wmi_handle, uint16_t len) { wmi_buf_t wmi_buf; diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c index de94c0d676ab..84045eb8ad58 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -445,8 +445,6 @@ ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev, struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem; uint16_t frxseq, rxseq, seq; htt_pdev_handle htt_pdev = pdev->htt_pdev; - void *rx_desc; - uint8_t index; seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask; qdf_assert(seq == 0); @@ -460,28 +458,6 @@ ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev, IEEE80211_SEQ_FRAG_MASK; more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG; - rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag); - qdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc)); - index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ? - txrx_sec_mcast : txrx_sec_ucast; - - /* - * Multicast/Broadcast frames should not be fragmented so drop - * such frames. - */ - if (index != txrx_sec_ucast) { - ol_rx_frames_free(htt_pdev, frag); - return; - } - - if (peer->security[index].sec_type != htt_sec_type_none && - !htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc)) { - ol_txrx_err("Unencrypted fragment received in security mode %d", - peer->security[index].sec_type); - ol_rx_frames_free(htt_pdev, frag); - return; - } - if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) { ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head, &rx_reorder_array_elem->tail, frag, &all_frag_present); @@ -724,13 +700,7 @@ ol_rx_defrag(ol_txrx_pdev_handle pdev, while (cur) { tmp_next = qdf_nbuf_next(cur); qdf_nbuf_set_next(cur, NULL); - /* - * Strict PN check between the first fragment of the current - * frame and the last fragment of the previous frame is not - * necessary. - */ - if (!ol_rx_pn_check_base(vdev, peer, tid, cur, - (cur == frag_list) ? false : true)) { + if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) { /* PN check failed,discard frags */ if (prev) { qdf_nbuf_set_next(prev, NULL); @@ -965,7 +935,7 @@ ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key, ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len, f_tkip.ic_miclen, (caddr_t) mic0); - if (qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen)) + if (!qdf_mem_cmp(mic, mic0, f_tkip.ic_miclen)) return OL_RX_DEFRAG_ERR; qdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen); diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_fwd.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_fwd.c index 7059303b9f50..ebc15b7206d9 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_fwd.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_fwd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2014-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -150,7 +150,6 @@ ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev, while (msdu) { struct ol_txrx_vdev_t *tx_vdev; void *rx_desc; - uint16_t off = 0; /* * Remember the next list elem, because our processing * may cause the MSDU to get linked into a different list. @@ -205,26 +204,6 @@ ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev, continue; } - if (pdev->cfg.is_high_latency) - off = htt_rx_msdu_rx_desc_size_hl( - pdev->htt_pdev, - rx_desc); - - if (vdev->opmode == wlan_op_mode_ap && - __qdf_nbuf_data_is_ipv4_eapol_pkt( - qdf_nbuf_data(msdu) + off) && - qdf_mem_cmp(qdf_nbuf_data(msdu) + - QDF_NBUF_DEST_MAC_OFFSET, - vdev->mac_addr.raw, - QDF_MAC_ADDR_SIZE)) { - TXRX_STATS_MSDU_LIST_INCR( - pdev, tx.dropped.host_reject, msdu); - qdf_nbuf_set_next(msdu, NULL); - qdf_nbuf_tx_free(msdu, QDF_NBUF_PKT_ERROR); - msdu = msdu_list; - continue; - } - /* * This MSDU needs to be forwarded to the tx path. * Check whether it also needs to be sent to the OS diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.c index 0bd8e75e5191..dd09c3f50c71 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2013-2017, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2013-2017 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -37,36 +37,25 @@ } while (0) int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk) + union htt_rx_pn_t *old_pn, int is_unicast, int opmode) { - if (strict_chk) - return ((new_pn->pn24 & 0xffffff) - (old_pn->pn24 & 0xffffff) - != 1); - else - return ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff)); + int rc = ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff)); + return rc; } int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk) + union htt_rx_pn_t *old_pn, int is_unicast, int opmode) { - if (strict_chk) - return ((new_pn->pn48 & 0xffffffffffffULL) - - (old_pn->pn48 & 0xffffffffffffULL) != 1); - else - return ((new_pn->pn48 & 0xffffffffffffULL) <= - (old_pn->pn48 & 0xffffffffffffULL)); + int rc = ((new_pn->pn48 & 0xffffffffffffULL) <= + (old_pn->pn48 & 0xffffffffffffULL)); + return rc; } int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk) + union htt_rx_pn_t *old_pn, int is_unicast, int opmode) { int pn_is_replay = 0; - /* TODO Strick check for WAPI is not implemented*/ - if (new_pn->pn128[1] == old_pn->pn128[1]) pn_is_replay = (new_pn->pn128[0] <= old_pn->pn128[0]); else @@ -84,7 +73,7 @@ int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn, qdf_nbuf_t ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, - unsigned int tid, qdf_nbuf_t msdu_list, bool strict_chk) + unsigned int tid, qdf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; union htt_rx_pn_t *last_pn; @@ -143,7 +132,7 @@ ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev, pn_is_replay = pdev->rx_pn[peer->security[index].sec_type]. cmp(&new_pn, last_pn, index == txrx_sec_ucast, - vdev->opmode, strict_chk); + vdev->opmode); } else { last_pn_valid = peer->tids_last_pn_valid[tid] = 1; } @@ -264,7 +253,7 @@ ol_rx_pn_check(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned int tid, qdf_nbuf_t msdu_list) { - msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list, false); + msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); ol_rx_fwd_check(vdev, peer, tid, msdu_list); } @@ -273,7 +262,7 @@ ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned int tid, qdf_nbuf_t msdu_list) { - msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list, false); + msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); ol_rx_deliver(vdev, peer, tid, msdu_list); } diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.h b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.h index fa64c3e5133c..8e0c007b091d 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.h +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_pn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2014-2017, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -24,16 +24,13 @@ #include /* ol_txrx_peer_t, etc. */ int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk); + union htt_rx_pn_t *old_pn, int is_unicast, int opmode); int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk); + union htt_rx_pn_t *old_pn, int is_unicast, int opmode); int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn, - union htt_rx_pn_t *old_pn, int is_unicast, int opmode, - bool strict_chk); + union htt_rx_pn_t *old_pn, int is_unicast, int opmode); /** * @brief If applicable, check the Packet Number to detect replays. @@ -90,12 +87,11 @@ ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev, * @param tid - which TID within the peer the rx frames belong to * @param msdu_list - NULL-terminated list of MSDUs to perform PN check on * (if PN check is applicable, i.e. PN length > 0) - * @param strick_chk - if PN consecutive stric check is needed or not * @return list of netbufs that didn't fail the PN check */ qdf_nbuf_t ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, - unsigned int tid, qdf_nbuf_t msdu_list, bool strict_chk); + unsigned int tid, qdf_nbuf_t msdu_list); #endif /* _OL_RX_PN_H_ */ diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c index 131e666a6dfb..2248f2a5b20b 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1951,32 +1951,14 @@ ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev) */ qdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0); - /* WEP: 24-bit PN */ - pdev->rx_pn[htt_sec_type_wep40].len = - pdev->rx_pn[htt_sec_type_wep104].len = - pdev->rx_pn[htt_sec_type_wep128].len = 24; - - pdev->rx_pn[htt_sec_type_wep40].cmp = - pdev->rx_pn[htt_sec_type_wep104].cmp = - pdev->rx_pn[htt_sec_type_wep128].cmp = ol_rx_pn_cmp24; - /* TKIP: 48-bit TSC, CCMP: 48-bit PN */ pdev->rx_pn[htt_sec_type_tkip].len = pdev->rx_pn[htt_sec_type_tkip_nomic].len = pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48; - - pdev->rx_pn[htt_sec_type_aes_ccmp_256].len = - pdev->rx_pn[htt_sec_type_aes_gcmp].len = - pdev->rx_pn[htt_sec_type_aes_gcmp_256].len = 48; - pdev->rx_pn[htt_sec_type_tkip].cmp = pdev->rx_pn[htt_sec_type_tkip_nomic].cmp = pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48; - pdev->rx_pn[htt_sec_type_aes_ccmp_256].cmp = - pdev->rx_pn[htt_sec_type_aes_gcmp].cmp = - pdev->rx_pn[htt_sec_type_aes_gcmp_256].cmp = ol_rx_pn_cmp48; - /* WAPI: 128-bit PN */ pdev->rx_pn[htt_sec_type_wapi].len = 128; pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp; @@ -4026,26 +4008,6 @@ ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr) return peer; } -void -ol_txrx_peer_flush_frags(ol_txrx_pdev_handle pdev, uint8_t vdev_id, - uint8_t *peer_mac) -{ - struct ol_txrx_peer_t *peer; - uint8_t peer_id; - - if (!pdev) - return; - - peer = ol_txrx_find_peer_by_addr_inc_ref(pdev, peer_mac, &peer_id); - - if (!peer) - return; - - ol_rx_reorder_peer_cleanup(peer->vdev, peer); - - OL_TXRX_PEER_UNREF_DELETE(peer); -} - /** * ol_txrx_dump_tx_desc() - dump tx desc total and free count * @txrx_pdev: Pointer to txrx pdev diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h index e0210a6e8838..3203a75ebeb0 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -802,7 +802,7 @@ struct ol_txrx_pdev_t { struct { int (*cmp)(union htt_rx_pn_t *new, union htt_rx_pn_t *old, - int is_unicast, int opmode, bool strict_chk); + int is_unicast, int opmode); int len; } rx_pn[htt_num_sec_types]; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c index 9acd8df823e7..e6526701a684 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -15447,12 +15447,6 @@ static int __wlan_hdd_cfg80211_add_key(struct wiphy *wiphy, QDF_STATUS qdf_ret_status; hdd_context_t *pHddCtx; hdd_ap_ctx_t *ap_ctx = WLAN_HDD_GET_AP_CTX_PTR(pAdapter); - ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX); - - if (!pdev) { - hdd_err("DP pdev is NULL"); - return -EINVAL; - } ENTER(); @@ -15616,10 +15610,6 @@ static int __wlan_hdd_cfg80211_add_key(struct wiphy *wiphy, setKey.keyDirection = eSIR_TX_RX; qdf_mem_copy(setKey.peerMac.bytes, mac_addr, QDF_MAC_ADDR_SIZE); } - - ol_txrx_peer_flush_frags(pdev, pAdapter->sessionId, - setKey.peerMac.bytes); - if ((QDF_IBSS_MODE == pAdapter->device_mode) && !pairwise) { /* if a key is already installed, block all subsequent ones */ if (pAdapter->sessionCtx.station.ibss_enc_key_installed) { diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c index 2454b9daef3d..f2275fc5e09f 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -5778,40 +5778,6 @@ static enum hdd_ipa_forward_type hdd_ipa_intrabss_forward( return ret; } -/** - * wlan_ipa_eapol_intrabss_fwd_check() - Check if eapol pkt intrabss fwd is - * allowed or not - * @nbuf: network buffer - * @vdev_id: vdev id - * - * Return: true if intrabss fwd is allowed for eapol else false - */ -static bool -wlan_ipa_eapol_intrabss_fwd_check(qdf_nbuf_t nbuf, uint8_t vdev_id) -{ - ol_txrx_vdev_handle vdev; - uint8_t *vdev_mac_addr; - - vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id); - - if (!vdev) { - HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, - "txrx vdev is NULL for vdev_id = %d", vdev_id); - return false; - } - - vdev_mac_addr = ol_txrx_get_vdev_mac_addr(vdev); - - if (!vdev_mac_addr) - return false; - - if (qdf_mem_cmp(qdf_nbuf_data(nbuf) + QDF_NBUF_DEST_MAC_OFFSET, - vdev_mac_addr, QDF_MAC_ADDR_SIZE)) - return false; - - return true; -} - /** * __hdd_ipa_w2i_cb() - WLAN to IPA callback handler * @priv: pointer to private data registered with IPA (we register a @@ -5832,12 +5798,6 @@ static void __hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt, struct hdd_ipa_iface_context *iface_context; uint8_t fw_desc; QDF_STATUS status = QDF_STATUS_SUCCESS; - bool is_eapol_wapi = false; - struct qdf_mac_addr peer_mac_addr = QDF_MAC_ADDR_ZERO_INITIALIZER; - uint8_t sta_idx; - ol_txrx_peer_handle peer; - ol_txrx_pdev_handle pdev; - hdd_station_ctx_t *sta_ctx; hdd_ipa = (struct hdd_ipa_priv *)priv; @@ -5861,13 +5821,6 @@ static void __hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt, return; } - pdev = cds_get_context(QDF_MODULE_ID_TXRX); - if (NULL == pdev) { - WMA_LOGE("%s: DP pdev is NULL", __func__); - kfree_skb(skb); - return; - } - if (hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) { session_id = (uint8_t)skb->cb[0]; iface_id = hdd_ipa->vdev_to_iface[session_id]; @@ -5910,52 +5863,6 @@ static void __hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt, skb_pull(skb, HDD_IPA_WLAN_CLD_HDR_LEN); } - if (iface_context->adapter->device_mode == QDF_STA_MODE) { - sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR( - iface_context->adapter); - qdf_copy_macaddr(&peer_mac_addr, - &sta_ctx->conn_info.bssId); - } else if (iface_context->adapter->device_mode - == QDF_SAP_MODE) { - qdf_mem_copy(peer_mac_addr.bytes, qdf_nbuf_data(skb) + - QDF_NBUF_SRC_MAC_OFFSET, - QDF_MAC_ADDR_SIZE); - } - - if (qdf_nbuf_is_ipv4_eapol_pkt(skb)) { - is_eapol_wapi = true; - if (iface_context->adapter->device_mode == - QDF_SAP_MODE && - !wlan_ipa_eapol_intrabss_fwd_check(skb, - iface_context->adapter->sessionId)) { - HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, - "EAPOL intrabss fwd drop DA: %pM", - qdf_nbuf_data(skb) + - QDF_NBUF_DEST_MAC_OFFSET); - hdd_ipa->ipa_rx_internal_drop_count++; - kfree_skb(skb); - return; - } - } else if (qdf_nbuf_is_ipv4_wapi_pkt(skb)) { - is_eapol_wapi = true; - } - - peer = ol_txrx_find_peer_by_addr(pdev, peer_mac_addr.bytes, - &sta_idx); - - /* - * Check for peer auth state before allowing non-EAPOL/WAPI - * frames to be intrabss forwarded or submitted to stack. - */ - if (peer && ol_txrx_get_peer_state(peer) != - OL_TXRX_PEER_STATE_AUTH && !is_eapol_wapi) { - HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, - "non-EAPOL/WAPI frame received when peer is unauthorized"); - hdd_ipa->ipa_rx_internal_drop_count++; - kfree_skb(skb); - return; - } - iface_context->stats.num_rx_ipa_excep++; /* Disable to forward Intra-BSS Rx packets when diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.h b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.h index 5b1353cf856b..72516e3f80e2 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.h +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.h @@ -1,6 +1,5 @@ /* * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -36,6 +35,7 @@ struct wireless_dev; #define NAN_SOCIAL_CHANNEL_5GHZ_LOWER_BAND 44 #define NAN_SOCIAL_CHANNEL_5GHZ_UPPER_BAND 149 +#define NDP_APP_INFO_LEN 255 #define NDP_PMK_LEN 32 #define NDP_SCID_BUF_LEN 256 #define NDP_NUM_INSTANCE_ID 255 diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c index 92fe1d522878..c66a8ab5b65b 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -910,13 +910,6 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf) hdd_dhcp_indication(pAdapter, staid, skb, QDF_RX); - if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(skb) && - qdf_mem_cmp(qdf_nbuf_data(skb) + - QDF_NBUF_DEST_MAC_OFFSET, - pAdapter->macAddressCurrent.bytes, - QDF_MAC_ADDR_SIZE))) - return QDF_STATUS_E_FAILURE; - hdd_event_eapol_log(skb, QDF_RX); qdf_dp_trace_log_pkt(pAdapter->sessionId, skb, QDF_RX); DPTRACE(qdf_dp_trace(skb, diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h index 1a603f547d7f..8c16f417604e 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -26,7 +26,7 @@ * * * This file was automatically generated by 'framesc' - * Wed Sep 29 13:23:21 2021 from the following file(s): + * Mon Mar 25 14:48:07 2019 from the following file(s): * * dot11f.frms * diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_assoc_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_assoc_utils.c index 5ac4c6f80577..027fe51b0fb6 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_assoc_utils.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_assoc_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1653,9 +1653,9 @@ lim_populate_peer_rate_set(tpAniSirGlobal pMac, { tSirMacRateSet tempRateSet; tSirMacRateSet tempRateSet2; - uint32_t i, j, val, min, isArate = 0; - uint8_t aRateIndex = 0; - uint8_t bRateIndex = 0; + uint32_t i, j, val, min, isArate; + + isArate = 0; /* copy operational rate set from psessionEntry */ if (psessionEntry->rateSet.numRates <= SIR_MAC_RATESET_EID_MAX) { @@ -1700,53 +1700,51 @@ lim_populate_peer_rate_set(tpAniSirGlobal pMac, * Sort rates in tempRateSet (they are likely to be already sorted) * put the result in pSupportedRates */ + { + uint8_t aRateIndex = 0; + uint8_t bRateIndex = 0; - qdf_mem_zero(pRates, sizeof(*pRates)); - for (i = 0; i < tempRateSet.numRates; i++) { - min = 0; - val = 0xff; - for (j = 0; (j < tempRateSet.numRates) && - (j < SIR_MAC_MAX_NUMBER_OF_RATES); j++) { - if ((uint32_t)(tempRateSet.rate[j] & 0x7f) < - val) { - val = tempRateSet.rate[j] & 0x7f; - min = j; - } - } - if (sirIsArate(tempRateSet.rate[min] & 0x7f)) { - isArate = 1; - } else if (sirIsBrate(tempRateSet.rate[min] & 0x7f)) { + qdf_mem_set((uint8_t *) pRates, sizeof(tSirSupportedRates), 0); + for (i = 0; i < tempRateSet.numRates; i++) { + min = 0; + val = 0xff; isArate = 0; - } else { - pe_debug("%d is neither 11a nor 11b rate", - tempRateSet.rate[min]); - tempRateSet.rate[min] = 0xff; - continue; - } - if (tempRateSet.rate[min] == pRates->llaRates[aRateIndex] || - tempRateSet.rate[min] == pRates->llbRates[bRateIndex]) { - pe_debug("Duplicate rate: %d", tempRateSet.rate[min]); - tempRateSet.rate[min] = 0xff; - continue; - } - /* - * HAL needs to know whether the rate is basic rate or not, - * as it needs to update the response rate table accordingly. - * e.g. if one of the 11a rates is basic rate, then that rate - * can be used for sending control frames. HAL updates the - * response rate table whenever basic rate set is changed. - */ - if (basicOnly && !(tempRateSet.rate[min] & 0x80)) { + for (j = 0; + (j < tempRateSet.numRates) + && (j < SIR_MAC_RATESET_EID_MAX); j++) { + if ((uint32_t) (tempRateSet.rate[j] & 0x7f) < + val) { + val = tempRateSet.rate[j] & 0x7f; + min = j; + } + } + if (sirIsArate(tempRateSet.rate[min] & 0x7f)) + isArate = 1; + /* + * HAL needs to know whether the rate is basic rate or not, as it needs to + * update the response rate table accordingly. e.g. if one of the 11a rates is + * basic rate, then that rate can be used for sending control frames. + * HAL updates the response rate table whenever basic rate set is changed. + */ + if (basicOnly) { + if (tempRateSet.rate[min] & 0x80) { + if (isArate) + pRates->llaRates[aRateIndex++] = + tempRateSet.rate[min]; + else + pRates->llbRates[bRateIndex++] = + tempRateSet.rate[min]; + } + } else { + if (isArate) + pRates->llaRates[aRateIndex++] = + tempRateSet.rate[min]; + else + pRates->llbRates[bRateIndex++] = + tempRateSet.rate[min]; + } tempRateSet.rate[min] = 0xff; - continue; } - if (isArate && aRateIndex < SIR_NUM_11A_RATES) - pRates->llaRates[aRateIndex++] = - tempRateSet.rate[min]; - else if (bRateIndex < SIR_NUM_11B_RATES) - pRates->llbRates[bRateIndex++] = - tempRateSet.rate[min]; - tempRateSet.rate[min] = 0xff; } if (IS_DOT11_MODE_HT(psessionEntry->dot11mode)) { diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c index d26345c9c6e8..75952720f3b9 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -2250,9 +2250,19 @@ static void fill_mlm_assoc_ind_vht(tpSirAssocReq assocreq, } } -QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, - tpDphHashNode sta_ds, - tpPESession session_entry) +/** + * lim_send_mlm_assoc_ind() - Sends assoc indication to SME + * @mac_ctx: Global Mac context + * @sta_ds: Station DPH hash entry + * @session_entry: PE session entry + * + * This function sends either LIM_MLM_ASSOC_IND + * or LIM_MLM_REASSOC_IND to SME. + * + * Return: None + */ +void lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, + tpDphHashNode sta_ds, tpPESession session_entry) { tpLimMlmAssocInd assoc_ind = NULL; tpSirAssocReq assoc_req; @@ -2265,7 +2275,7 @@ QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, if (!session_entry->parsedAssocReq) { pe_err(" Parsed Assoc req is NULL"); - return QDF_STATUS_E_INVAL; + return; } /* Get a copy of the already parsed Assoc Request */ @@ -2274,7 +2284,7 @@ QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, if (!assoc_req) { pe_err("assoc req for assoc_id:%d is NULL", sta_ds->assocId); - return QDF_STATUS_E_INVAL; + return; } /* Get the phy_mode */ @@ -2299,7 +2309,7 @@ QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, lim_release_peer_idx(mac_ctx, sta_ds->assocId, session_entry); pe_err("AllocateMemory failed for assoc_ind"); - return QDF_STATUS_E_NOMEM; + return; } qdf_mem_copy((uint8_t *) assoc_ind->peerMacAddr, (uint8_t *) sta_ds->staAddr, sizeof(tSirMacAddr)); @@ -2352,7 +2362,7 @@ QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, pe_err("rsnIEdata index out of bounds: %d", rsn_len); qdf_mem_free(assoc_ind); - return QDF_STATUS_E_INVAL; + return; } assoc_ind->rsnIE.rsnIEdata[rsn_len] = SIR_MAC_WPA_EID; @@ -2504,5 +2514,5 @@ QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, (uint32_t *) assoc_ind); qdf_mem_free(assoc_ind); } - return QDF_STATUS_SUCCESS; + return; } diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c index 2f29ab289afa..4877816d63c0 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1819,11 +1819,6 @@ QDF_STATUS aead_decrypt_assoc_rsp(tpAniSirGlobal mac_ctx, uint8_t *fils_ies; struct pe_fils_session *fils_info = (session->fils_info); - if (*n_frame < FIXED_PARAM_OFFSET_ASSOC_RSP) { - pe_debug("payload len is less than ASSOC RES offset"); - return QDF_STATUS_E_FAILURE; - } - status = find_ie_data_after_fils_session_ie(mac_ctx, p_frame + FIXED_PARAM_OFFSET_ASSOC_RSP, ((*n_frame) - diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_rsp_messages.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_rsp_messages.c index 6ad4dba6e668..5110ecd9432a 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_rsp_messages.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_rsp_messages.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -1977,15 +1977,7 @@ void lim_process_ap_mlm_add_sta_rsp(tpAniSirGlobal pMac, tpSirMsgQ limMsgQ, * 2) PE receives eWNI_SME_ASSOC_CNF from SME * 3) BTAMP-AP sends Re/Association Response to BTAMP-STA */ - if (lim_send_mlm_assoc_ind(pMac, pStaDs, psessionEntry) != - QDF_STATUS_SUCCESS) { - lim_reject_association(pMac, pStaDs->staAddr, - pStaDs->mlmStaContext.subType, - true, pStaDs->mlmStaContext.authType, - pStaDs->assocId, true, - eSIR_MAC_UNSPEC_FAILURE_STATUS, - psessionEntry); - } + lim_send_mlm_assoc_ind(pMac, pStaDs, psessionEntry); /* fall though to reclaim the original Add STA Response message */ end: if (0 != limMsgQ->bodyptr) { diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h index 144395e096c7..8caf092c4af5 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -457,21 +457,8 @@ tSirRetStatus lim_process_auth_frame_no_session(tpAniSirGlobal pMac, uint8_t *, void *body); void lim_process_assoc_req_frame(tpAniSirGlobal, uint8_t *, uint8_t, tpPESession); - -/** - * lim_send_mlm_assoc_ind() - Sends assoc indication to SME - * @mac_ctx: Global Mac context - * @sta_ds: Station DPH hash entry - * @session_entry: PE session entry - * - * This function sends either LIM_MLM_ASSOC_IND - * or LIM_MLM_REASSOC_IND to SME. - * - * Return: QDF_STATUS - */ -QDF_STATUS lim_send_mlm_assoc_ind(tpAniSirGlobal mac_ctx, - tpDphHashNode sta_ds, - tpPESession session_entry); +void lim_send_mlm_assoc_ind(tpAniSirGlobal pMac, tpDphHashNode pStaDs, + tpPESession psessionEntry); void lim_process_assoc_rsp_frame(tpAniSirGlobal, uint8_t *, uint8_t, tpPESession); void lim_process_disassoc_frame(tpAniSirGlobal, uint8_t *, tpPESession); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c index 66fef4583226..fe196f7d9a90 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c index 55709e876716..511b07cf699f 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2018, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -156,7 +156,7 @@ sch_append_addn_ie(tpAniSirGlobal mac_ctx, tpPESession session, /** * sch_get_csa_ecsa_count_offset() - get the offset of Switch count field - * @ie: pointer to the beginning of IEs in the beacon frame buffer + * @ie: pointer to the beggining of IEs in the beacon frame buffer * @ie_len: length of the IEs in the buffer * @csa_count_offset: pointer to the csa_count_offset variable in the caller * @ecsa_count_offset: pointer to the ecsa_count_offset variable in the caller @@ -195,9 +195,6 @@ static void sch_get_csa_ecsa_count_offset(uint8_t *ie, uint32_t ie_len, *ecsa_count_offset = offset + SCH_ECSA_SWITCH_COUNT_OFFSET; - if (ie_len < elem_len) - return; - ie_len -= elem_len; offset += elem_len; ptr += (elem_len + 2); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c index 55781cf9a8e0..391d1b8801eb 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -24,7 +24,7 @@ * * * This file was automatically generated by 'framesc' - * Wed Sep 29 13:23:21 2021 from the following file(s): + * Mon Mar 25 14:48:07 2019 from the following file(s): * * dot11f.frms * @@ -335,7 +335,7 @@ static uint32_t get_container_ies_len(tpAniSirGlobal pCtx, len += *(pBufRemaining+1); pBufRemaining += len + 2; len += 2; - while (len + 1 < nBuf) { + while (len < nBuf) { pIe = find_ie_defn(pCtx, pBufRemaining, nBuf - len, IEs); if (NULL == pIe) break; @@ -13659,30 +13659,25 @@ static uint32_t unpack_tlv_core(tpAniSirGlobal pCtx, } /* & length, */ if (pTlv->sLen == 2) { + framesntohs(pCtx, &len, pBufRemaining, pTlv->fMsb); if (2 > nBufRemaining) { FRAMES_LOG0(pCtx, FRLOGE, FRFL("This frame reports " "fewer two byte(s) remaining.\n")); status |= DOT11F_INCOMPLETE_TLV; FRAMES_DBG_BREAK(); goto MandatoryCheck; - } - framesntohs(pCtx, &len, pBufRemaining, pTlv->fMsb); - pBufRemaining += 2; - nBufRemaining -= 2; + } + pBufRemaining += 2; + nBufRemaining -= 2; } else { len = *pBufRemaining; pBufRemaining += 1; nBufRemaining -= 1; } } else { - if (TLVs[0].sType > nBufRemaining) { - FRAMES_LOG0(pCtx, FRLOGE, FRFL("This frame reports " - "fewer LVs[0].sType byte(s) remaining.\n")); - status |= DOT11F_INCOMPLETE_TLV; - goto MandatoryCheck; - } pBufRemaining += TLVs[0].sType; nBufRemaining -= TLVs[0].sType; + framesntohs(pCtx, &len, pBufRemaining, (TLVs[0].sType == 2)); if (2 > nBufRemaining) { FRAMES_LOG0(pCtx, FRLOGE, FRFL("This frame reports " "fewer two byte(s) remaining.\n")); @@ -13690,7 +13685,6 @@ static uint32_t unpack_tlv_core(tpAniSirGlobal pCtx, FRAMES_DBG_BREAK(); goto MandatoryCheck; } - framesntohs(pCtx, &len, pBufRemaining, (TLVs[0].sType == 2)); pBufRemaining += 2; nBufRemaining -= 2; } diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma.h index 79f812017119..5728194c58d6 100644 --- a/drivers/staging/qcacld-3.0/core/wma/inc/wma.h +++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma.h @@ -1,6 +1,5 @@ /* * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -56,7 +55,6 @@ #define WMA_RESUME_TIMEOUT 6000 #define MAX_MEM_CHUNKS 32 #define NAN_CLUSTER_ID_BYTES 4 -#define NDP_APP_INFO_LEN 255 #define WMA_CRASH_INJECT_TIMEOUT 5000 diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c index ad1017e9bd7b..60968b3a853e 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -2371,22 +2371,8 @@ static QDF_STATUS wma_unified_bcn_tmpl_send(tp_wma_handle wma, tmpl_len = *(uint32_t *) &bcn_info->beacon[0]; else tmpl_len = bcn_info->beaconLength; - - if (tmpl_len > WMI_BEACON_TX_BUFFER_SIZE) { - WMA_LOGE("tmpl_len: %d > %d. Invalid tmpl len", tmpl_len, - WMI_BEACON_TX_BUFFER_SIZE); - return -EINVAL; - } - - if (p2p_ie_len) { - if (tmpl_len <= p2p_ie_len) { - WMA_LOGE("tmpl_len %d <= p2p_ie_len %d, Invalid", - tmpl_len, p2p_ie_len); - return -EINVAL; - } + if (p2p_ie_len) tmpl_len -= (uint32_t) p2p_ie_len; - } - frm = bcn_info->beacon + bytes_to_strip; tmpl_len_aligned = roundup(tmpl_len, sizeof(A_UINT32)); /* diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c index 6702ef10c3c8..dff36acb30ee 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c @@ -1,6 +1,5 @@ /* * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -721,11 +720,6 @@ static int wma_ndp_confirm_event_handler(void *handle, uint8_t *event_info, WMA_LOGE(FL("malloc failed")); return QDF_STATUS_E_NOMEM; } - - if (ndp_confirm.ndp_info.ndp_app_info_len > NDP_APP_INFO_LEN) - ndp_confirm.ndp_info.ndp_app_info_len = - NDP_APP_INFO_LEN; - qdf_mem_copy(&ndp_confirm.ndp_info.ndp_app_info, event->ndp_app_info, ndp_confirm.ndp_info.ndp_app_info_len); diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index d14a2313fcce..f5cedbbc552a 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -921,7 +921,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* SSID */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_)); if (p && ie_len > 0) { - ie_len = min_t(int, ie_len, sizeof(pbss_network->Ssid.Ssid)); memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len); pbss_network->Ssid.SsidLength = ie_len; @@ -940,7 +939,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* get supported rates */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_)); if (p != NULL) { - ie_len = min_t(int, ie_len, NDIS_802_11_LENGTH_RATES_EX); memcpy(supportRate, p+2, ie_len); supportRateNum = ie_len; } @@ -948,8 +946,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) /* get ext_supported rates */ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_); if (p != NULL) { - ie_len = min_t(int, ie_len, - NDIS_802_11_LENGTH_RATES_EX - supportRateNum); memcpy(supportRate+supportRateNum, p+2, ie_len); supportRateNum += ie_len; } @@ -1065,7 +1061,6 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) pht_cap->supp_mcs_set[0] = 0xff; pht_cap->supp_mcs_set[1] = 0x0; } - ie_len = min_t(int, ie_len, sizeof(pmlmepriv->htpriv.ht_cap)); memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len); } diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c index c29dc9182470..2a6192e08b75 100644 --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c @@ -1174,11 +1174,9 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a, break; } sec_len = *(pos++); len -= 1; - if (sec_len > 0 && - sec_len <= len && - sec_len <= 32) { + if (sec_len > 0 && sec_len <= len) { ssid[ssid_index].SsidLength = sec_len; - memcpy(ssid[ssid_index].Ssid, pos, sec_len); + memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength); ssid_index++; } pos += sec_len; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 27a7d448ff61..9e678664d573 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -53,7 +53,6 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ - {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c index c29d74cf9520..fa7a89357d64 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c @@ -2712,14 +2712,13 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev) free_irq(dev->irq, dev); priv->irq = 0; } + free_rtllib(dev); if (dev->mem_start != 0) { iounmap((void __iomem *)dev->mem_start); release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); } - - free_rtllib(dev); } else { priv = rtllib_priv(dev); } diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c index 6f0be1db6fb1..70df6a1485d6 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c @@ -419,10 +419,9 @@ static int _rtl92e_wx_set_scan(struct net_device *dev, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); - - ieee->current_network.ssid_len = len; - memcpy(ieee->current_network.ssid, req->essid, len); + ieee->current_network.ssid_len = req->essid_len; + memcpy(ieee->current_network.ssid, req->essid, + req->essid_len); } } diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index b9e978e895c6..563ac12f0b2c 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1160,7 +1160,7 @@ struct rtllib_network { bool bWithAironetIE; bool bCkipSupported; bool bCcxRmEnable; - u8 CcxRmState[2]; + u16 CcxRmState[2]; bool bMBssidValid; u8 MBssidMask; u8 MBssid[ETH_ALEN]; diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c index 6921e036a828..37343ec3b484 100644 --- a/drivers/staging/rtl8192e/rtllib_rx.c +++ b/drivers/staging/rtl8192e/rtllib_rx.c @@ -1988,7 +1988,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee, info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { - memcpy(network->CcxRmState, &info_element->data[4], 2); + memcpy(network->CcxRmState, &info_element[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index a8a4e45a2e51..1e0d2a33787e 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -267,7 +267,7 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data) status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE, - indx | 0xfe00, 0, usbdata, 1, 500); + indx | 0xfe00, 0, usbdata, 1, HZ / 2); kfree(usbdata); if (status < 0) @@ -287,7 +287,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data) status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8187_REQ_GET_REGS, RTL8187_REQT_READ, - indx | 0xfe00, 0, usbdata, 1, 500); + indx | 0xfe00, 0, usbdata, 1, HZ / 2); *data = *usbdata; kfree(usbdata); @@ -314,7 +314,7 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data) status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 1, 500); + usbdata, 1, HZ / 2); kfree(usbdata); if (status < 0) @@ -340,7 +340,7 @@ void write_nic_word(struct net_device *dev, int indx, u16 data) status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 2, 500); + usbdata, 2, HZ / 2); kfree(usbdata); if (status < 0) @@ -365,7 +365,7 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data) status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 4, 500); + usbdata, 4, HZ / 2); kfree(usbdata); @@ -390,7 +390,7 @@ int read_nic_byte(struct net_device *dev, int indx, u8 *data) status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8187_REQ_GET_REGS, RTL8187_REQT_READ, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 1, 500); + usbdata, 1, HZ / 2); *data = *usbdata; kfree(usbdata); @@ -417,7 +417,7 @@ int read_nic_word(struct net_device *dev, int indx, u16 *data) status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8187_REQ_GET_REGS, RTL8187_REQT_READ, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 2, 500); + usbdata, 2, HZ / 2); *data = *usbdata; kfree(usbdata); @@ -441,7 +441,7 @@ static int read_nic_word_E(struct net_device *dev, int indx, u16 *data) status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8187_REQ_GET_REGS, RTL8187_REQT_READ, - indx | 0xfe00, 0, usbdata, 2, 500); + indx | 0xfe00, 0, usbdata, 2, HZ / 2); *data = *usbdata; kfree(usbdata); @@ -467,7 +467,7 @@ int read_nic_dword(struct net_device *dev, int indx, u32 *data) status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8187_REQ_GET_REGS, RTL8187_REQT_READ, (indx & 0xff) | 0xff00, (indx >> 8) & 0x0f, - usbdata, 4, 500); + usbdata, 4, HZ / 2); *data = *usbdata; kfree(usbdata); @@ -3418,7 +3418,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum, u32 *TotalRxDataNum) { u16 SlotIndex; - u16 i; + u8 i; *TotalRxBcnNum = 0; *TotalRxDataNum = 0; diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c index 8fb194075149..4911fef2e2e5 100644 --- a/drivers/staging/rtl8192u/r8192U_wx.c +++ b/drivers/staging/rtl8192u/r8192U_wx.c @@ -341,10 +341,8 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, struct iw_scan_req *req = (struct iw_scan_req *)b; if (req->essid_len) { - int len = min_t(int, req->essid_len, IW_ESSID_MAX_SIZE); - - ieee->current_network.ssid_len = len; - memcpy(ieee->current_network.ssid, req->essid, len); + ieee->current_network.ssid_len = req->essid_len; + memcpy(ieee->current_network.ssid, req->essid, req->essid_len); } } diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index b4eb6af4e565..562a10203127 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -242,10 +242,8 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter, psurveyPara->ss_ssidlen = 0; memset(psurveyPara->ss_ssid, 0, IW_ESSID_MAX_SIZE + 1); if ((pssid != NULL) && (pssid->SsidLength)) { - int len = min_t(int, pssid->SsidLength, IW_ESSID_MAX_SIZE); - - memcpy(psurveyPara->ss_ssid, pssid->Ssid, len); - psurveyPara->ss_ssidlen = cpu_to_le32(len); + memcpy(psurveyPara->ss_ssid, pssid->Ssid, pssid->SsidLength); + psurveyPara->ss_ssidlen = cpu_to_le32(pssid->SsidLength); } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); r8712_enqueue_cmd(pcmdpriv, ph2c); diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c index 20052fa2eedf..2b348439242f 100644 --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c @@ -935,7 +935,7 @@ static int r871x_wx_set_priv(struct net_device *dev, struct iw_point *dwrq = (struct iw_point *)awrq; len = dwrq->length; - ext = strndup_user(dwrq->pointer, len); + ext = memdup_user(dwrq->pointer, len); if (IS_ERR(ext)) return PTR_ERR(ext); diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c index fd4f1859cdae..489a9e6d52fc 100644 --- a/drivers/staging/rtl8712/usb_ops_linux.c +++ b/drivers/staging/rtl8712/usb_ops_linux.c @@ -511,7 +511,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value, memcpy(pIo_buf, pdata, len); } status = usb_control_msg(udev, pipe, request, reqtype, value, index, - pIo_buf, len, 500); + pIo_buf, len, HZ / 2); if (status > 0) { /* Success this control transfer. */ if (requesttype == 0x01) { /* For Control read transfer, we have to copy the read diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c index 71aa623facd6..09063b82326f 100644 --- a/drivers/staging/speakup/speakup_dectlk.c +++ b/drivers/staging/speakup/speakup_dectlk.c @@ -51,7 +51,7 @@ static unsigned char get_index(void); static int in_escape; static int is_flushing; -static DEFINE_SPINLOCK(flush_lock); +static spinlock_t flush_lock; static DECLARE_WAIT_QUEUE_HEAD(flush); static struct var_t vars[] = { diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index b2a76ecb5789..761b065a40bb 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -452,9 +452,6 @@ static bool iscsit_tpg_check_network_portal( break; } spin_unlock(&tpg->tpg_np_lock); - - if (match) - break; } spin_unlock(&tiqn->tiqn_tpg_lock); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index f1e09e7704af..1fe782f9ee81 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -1735,6 +1735,7 @@ int core_alua_set_tg_pt_gp_id( pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); return -ENOSPC; } again: diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7a8da5758be8..4198ed4ac607 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -800,8 +800,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&dev->t10_alua.lba_map_list); spin_lock_init(&dev->t10_alua.lba_map_lock); - INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); - dev->t10_wwn.t10_dev = dev; dev->t10_alua.t10_dev = dev; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index c0ee5e1f2b10..272e6f755322 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -146,7 +146,6 @@ void transport_clear_lun_ref(struct se_lun *); void transport_send_task_abort(struct se_cmd *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); void target_qf_do_work(struct work_struct *work); -void target_do_delayed_work(struct work_struct *work); bool target_check_wce(struct se_device *dev); bool target_check_fua(struct se_device *dev); void __target_execute_cmd(struct se_cmd *, bool); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 0ce3697ecbd7..d72a4058fd08 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -629,9 +629,8 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, unsigned char *buf; buf = transport_kmap_data_sg(cmd); - if (!buf) { + if (!buf) ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ - } if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index a2ffa10e5a41..608117819366 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -37,7 +37,7 @@ #include "target_core_alua.h" static sense_reason_t -sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); static sense_reason_t @@ -311,14 +311,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) } static sense_reason_t -sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) +sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) { struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); sense_reason_t ret; - if ((flags & 0x04) || (flags & 0x02)) { + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); @@ -340,7 +340,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op } /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ - if (flags & 0x10) { + if (flags[0] & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); return TCM_INVALID_CDB_FIELD; } @@ -348,7 +348,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * translated into block discard requests within backend code. */ - if (flags & 0x08) { + if (flags[0] & 0x08) { if (!ops->execute_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE; @@ -363,7 +363,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op if (!ops->execute_write_same) return TCM_UNSUPPORTED_SCSI_OPCODE; - ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); + ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); if (ret) return ret; @@ -721,9 +721,10 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ } static sense_reason_t -sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, u32 sectors, bool is_write) { + u8 protect = cdb[1] >> 5; int sp_ops = cmd->se_sess->sup_prot_ops; int pi_prot_type = dev->dev_attrib.pi_prot_type; bool fabric_prot = false; @@ -771,7 +772,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, /* Fallthrough */ default: pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " - "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); + "PROTECT: 0x%02x\n", cdb[0], protect); return TCM_INVALID_CDB_FIELD; } @@ -846,7 +847,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -860,7 +861,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -874,7 +875,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -895,7 +896,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; @@ -909,7 +910,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; @@ -923,7 +924,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (sbc_check_dpofua(dev, cmd, cdb)) return TCM_INVALID_CDB_FIELD; - ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; @@ -982,7 +983,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[12]); - ret = sbc_setup_write_same(cmd, cdb[10], ops); + ret = sbc_setup_write_same(cmd, &cdb[10], ops); if (ret) return ret; break; @@ -1075,7 +1076,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = sbc_get_size(cmd, 1); cmd->t_task_lba = get_unaligned_be64(&cdb[2]); - ret = sbc_setup_write_same(cmd, cdb[1], ops); + ret = sbc_setup_write_same(cmd, &cdb[1], ops); if (ret) return ret; break; @@ -1093,7 +1094,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) * Follow sbcr26 with WRITE_SAME (10) and check for the existence * of byte 1 bit 3 UNMAP instead of original reserved field */ - ret = sbc_setup_write_same(cmd, cdb[1], ops); + ret = sbc_setup_write_same(cmd, &cdb[1], ops); if (ret) return ret; break; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 75b29458c4da..7199bac67333 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1730,10 +1730,6 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: - case TCM_TOO_MANY_TARGET_DESCS: - case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: - case TCM_TOO_MANY_SEGMENT_DESCS: - case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: break; case TCM_OUT_OF_RESOURCES: sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1877,35 +1873,32 @@ static bool target_handle_task_attr(struct se_cmd *cmd) */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: - atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: - atomic_inc_mb(&dev->delayed_cmd_count); + atomic_inc_mb(&dev->dev_ordered_sync); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); + + /* + * Execute an ORDERED command if no other older commands + * exist that need to be completed first. + */ + if (!atomic_read(&dev->simple_cmds)) + return false; break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc_mb(&dev->non_ordered); - - if (atomic_read(&dev->delayed_cmd_count) == 0) - return false; + atomic_inc_mb(&dev->simple_cmds); break; } - if (cmd->sam_task_attr != TCM_ORDERED_TAG) { - atomic_inc_mb(&dev->delayed_cmd_count); - /* - * We will account for this when we dequeue from the delayed - * list. - */ - atomic_dec_mb(&dev->non_ordered); - } + if (atomic_read(&dev->dev_ordered_sync) == 0) + return false; spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); @@ -1913,12 +1906,6 @@ static bool target_handle_task_attr(struct se_cmd *cmd) pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); - /* - * We may have no non ordered cmds when this function started or we - * could have raced with the last simple/head cmd completing, so kick - * the delayed handler here. - */ - schedule_work(&dev->delayed_cmd_work); return true; } @@ -1969,48 +1956,29 @@ EXPORT_SYMBOL(target_execute_cmd); * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary */ -void target_do_delayed_work(struct work_struct *work) +static void target_restart_delayed_cmds(struct se_device *dev) { - struct se_device *dev = container_of(work, struct se_device, - delayed_cmd_work); - - spin_lock(&dev->delayed_cmd_lock); - while (!dev->ordered_sync_in_progress) { + for (;;) { struct se_cmd *cmd; - if (list_empty(&dev->delayed_cmd_list)) + spin_lock(&dev->delayed_cmd_lock); + if (list_empty(&dev->delayed_cmd_list)) { + spin_unlock(&dev->delayed_cmd_lock); break; + } cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); - - if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - /* - * Check if we started with: - * [ordered] [simple] [ordered] - * and we are now at the last ordered so we have to wait - * for the simple cmd. - */ - if (atomic_read(&dev->non_ordered) > 0) - break; - - dev->ordered_sync_in_progress = true; - } - list_del(&cmd->se_delayed_node); - atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); - if (cmd->sam_task_attr != TCM_ORDERED_TAG) - atomic_inc_mb(&dev->non_ordered); - cmd->transport_state |= CMD_T_SENT; __target_execute_cmd(cmd, true); - spin_lock(&dev->delayed_cmd_lock); + if (cmd->sam_task_attr == TCM_ORDERED_TAG) + break; } - spin_unlock(&dev->delayed_cmd_lock); } /* @@ -2028,19 +1996,16 @@ static void transport_complete_task_attr(struct se_cmd *cmd) goto restart; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { - atomic_dec_mb(&dev->non_ordered); + atomic_dec_mb(&dev->simple_cmds); dev->dev_cur_ordered_id++; pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n", dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { - atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - spin_lock(&dev->delayed_cmd_lock); - dev->ordered_sync_in_progress = false; - spin_unlock(&dev->delayed_cmd_lock); + atomic_dec_mb(&dev->dev_ordered_sync); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", @@ -2049,8 +2014,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; restart: - if (atomic_read(&dev->delayed_cmd_count) > 0) - schedule_work(&dev->delayed_cmd_work); + target_restart_delayed_cmds(dev); } static void transport_complete_qf(struct se_cmd *cmd) @@ -2789,7 +2753,9 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { - lockdep_assert_held(&cmd->t_state_lock); + + assert_spin_locked(&cmd->t_state_lock); + WARN_ON_ONCE(!irqs_disabled()); if (fabric_stop) cmd->transport_state |= CMD_T_FABRIC_STOP; @@ -2898,26 +2864,6 @@ static const struct sense_info sense_info_table[] = { .key = ILLEGAL_REQUEST, .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ }, - [TCM_TOO_MANY_TARGET_DESCS] = { - .key = ILLEGAL_REQUEST, - .asc = 0x26, - .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ - }, - [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { - .key = ILLEGAL_REQUEST, - .asc = 0x26, - .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ - }, - [TCM_TOO_MANY_SEGMENT_DESCS] = { - .key = ILLEGAL_REQUEST, - .asc = 0x26, - .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ - }, - [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { - .key = ILLEGAL_REQUEST, - .asc = 0x26, - .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ - }, [TCM_PARAMETER_LIST_LENGTH_ERROR] = { .key = ILLEGAL_REQUEST, .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 958737512229..6415e9b09a52 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -52,87 +52,64 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -/** - * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers - * - * @se_dev: device being considered for match - * @dev_wwn: XCOPY requested NAA dev_wwn - * @return: 1 on match, 0 on no-match - */ -static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, - const unsigned char *dev_wwn) +static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, + bool src) { - unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; + struct se_device *se_dev; + unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; int rc; - if (!se_dev->dev_attrib.emulate_3pc) { - pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev); - return 0; - } + if (src) + dev_wwn = &xop->dst_tid_wwn[0]; + else + dev_wwn = &xop->src_tid_wwn[0]; - memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); - target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); + mutex_lock(&g_device_mutex); + list_for_each_entry(se_dev, &g_device_list, g_dev_node) { - rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); - if (rc != 0) { - pr_debug("XCOPY: skip non-matching: %*ph\n", - XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn); - return 0; - } - pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); + if (!se_dev->dev_attrib.emulate_3pc) + continue; - return 1; -} + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); -static int target_xcopy_locate_se_dev_e4(struct se_session *sess, - const unsigned char *dev_wwn, - struct se_device **_found_dev, - struct percpu_ref **_found_lun_ref) -{ - struct se_dev_entry *deve; - struct se_node_acl *nacl; - struct se_lun *this_lun = NULL; - struct se_device *found_dev = NULL; - - /* cmd with NULL sess indicates no associated $FABRIC_MOD */ - if (!sess) - goto err_out; - - pr_debug("XCOPY 0xe4: searching for: %*ph\n", - XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn); - - nacl = sess->se_node_acl; - rcu_read_lock(); - hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { - struct se_device *this_dev; - int rc; - - this_lun = rcu_dereference(deve->se_lun); - this_dev = rcu_dereference_raw(this_lun->lun_se_dev); - - rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn); - if (rc) { - if (percpu_ref_tryget_live(&this_lun->lun_ref)) - found_dev = this_dev; - break; + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); + if (rc != 0) + continue; + + if (src) { + xop->dst_dev = se_dev; + pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" + " se_dev\n", xop->dst_dev); + } else { + xop->src_dev = se_dev; + pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" + " se_dev\n", xop->src_dev); + } + + rc = target_depend_item(&se_dev->dev_group.cg_item); + if (rc != 0) { + pr_err("configfs_depend_item attempt failed:" + " %d for se_dev: %p\n", rc, se_dev); + mutex_unlock(&g_device_mutex); + return rc; } + + pr_debug("Called configfs_depend_item for se_dev: %p" + " se_dev->se_dev_group: %p\n", se_dev, + &se_dev->dev_group); + + mutex_unlock(&g_device_mutex); + return 0; } - rcu_read_unlock(); - if (found_dev == NULL) - goto err_out; - - pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n", - found_dev, &found_dev->dev_group); - *_found_dev = found_dev; - *_found_lun_ref = &this_lun->lun_ref; - return 0; -err_out: + mutex_unlock(&g_device_mutex); + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); return -EINVAL; } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, - unsigned char *p, unsigned short cscd_index) + unsigned char *p, bool src) { unsigned char *desc = p; unsigned short ript; @@ -177,13 +154,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op return -EINVAL; } - if (cscd_index != xop->stdi && cscd_index != xop->dtdi) { - pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor " - "dest\n", cscd_index); - return 0; - } - - if (cscd_index == xop->stdi) { + if (src) { memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* * Determine if the source designator matches the local device @@ -195,15 +166,10 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" " received xop\n", xop->src_dev); } - } - - if (cscd_index == xop->dtdi) { + } else { memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); /* - * Determine if the destination designator matches the local - * device. If @cscd_index corresponds to both source (stdi) and - * destination (dtdi), or dtdi comes after stdi, then - * XCOL_DEST_RECV_OP wins. + * Determine if the destination designator matches the local device */ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], XCOPY_NAA_IEEE_REGEX_LEN)) { @@ -223,9 +189,9 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, { struct se_device *local_dev = se_cmd->se_dev; unsigned char *desc = p; - int offset = tdll % XCOPY_TARGET_DESC_LEN, rc; - unsigned short cscd_index = 0; + int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; unsigned short start = 0; + bool src = true; *sense_ret = TCM_INVALID_PARAMETER_LIST; @@ -248,19 +214,25 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, while (start < tdll) { /* - * Check target descriptor identification with 0xE4 type, and - * compare the current index with the CSCD descriptor IDs in - * the segment descriptor. Use VPD 0x83 WWPN matching .. + * Check target descriptor identification with 0xE4 type with + * use VPD 0x83 WWPN matching .. */ switch (desc[0]) { case 0xe4: rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, - &desc[0], cscd_index); + &desc[0], src); if (rc != 0) goto out; + /* + * Assume target descriptors are in source -> destination order.. + */ + if (src) + src = false; + else + src = true; start += XCOPY_TARGET_DESC_LEN; desc += XCOPY_TARGET_DESC_LEN; - cscd_index++; + ret++; break; default: pr_err("XCOPY unsupported descriptor type code:" @@ -269,25 +241,10 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, } } - switch (xop->op_origin) { - case XCOL_SOURCE_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, - xop->dst_tid_wwn, - &xop->dst_dev, - &xop->remote_lun_ref); - break; - case XCOL_DEST_RECV_OP: - rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess, - xop->src_tid_wwn, - &xop->src_dev, - &xop->remote_lun_ref); - break; - default: - pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - " - "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi); - rc = -EINVAL; - break; - } + if (xop->op_origin == XCOL_SOURCE_RECV_OP) + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); + else + rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); /* * If a matching IEEE NAA 0x83 descriptor for the requested device * is not located on this node, return COPY_ABORTED with ASQ/ASQC @@ -304,7 +261,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", xop->dst_dev, &xop->dst_tid_wwn[0]); - return cscd_index; + return ret; out: return -EINVAL; @@ -348,26 +305,17 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, struct xcopy_op *xop, unsigned char *p, - unsigned int sdll, sense_reason_t *sense_ret) + unsigned int sdll) { unsigned char *desc = p; unsigned int start = 0; int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; - *sense_ret = TCM_INVALID_PARAMETER_LIST; - if (offset != 0) { pr_err("XCOPY segment descriptor list length is not" " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); return -EINVAL; } - if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) { - pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too" - " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll); - /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */ - *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS; - return -EINVAL; - } while (start < sdll) { /* @@ -424,12 +372,18 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) { + struct se_device *remote_dev; + if (xop->op_origin == XCOL_SOURCE_RECV_OP) - pr_debug("putting dst lun_ref for %p\n", xop->dst_dev); + remote_dev = xop->dst_dev; else - pr_debug("putting src lun_ref for %p\n", xop->src_dev); + remote_dev = xop->src_dev; - percpu_ref_put(xop->remote_lun_ref); + pr_debug("Calling configfs_undepend_item for" + " remote_dev: %p remote_dev->dev_group: %p\n", + remote_dev, &remote_dev->dev_group.cg_item); + + target_undepend_item(&remote_dev->dev_group.cg_item); } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) @@ -939,20 +893,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, tdll, sdll, inline_dl); - /* - * skip over the target descriptors until segment descriptors - * have been passed - CSCD ids are needed to determine src and dest. - */ - seg_desc = &p[16] + tdll; - - rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, - sdll, &ret); - if (rc <= 0) - goto out; - - pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, - rc * XCOPY_SEGMENT_DESC_LEN); - rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); if (rc <= 0) goto out; @@ -970,8 +910,18 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * XCOPY_TARGET_DESC_LEN); + seg_desc = &p[16]; + seg_desc += (rc * XCOPY_TARGET_DESC_LEN); + + rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); + if (rc <= 0) { + xcopy_pt_undepend_remotedev(xop); + goto out; + } transport_kunmap_data_sg(se_cmd); + pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, + rc * XCOPY_SEGMENT_DESC_LEN); INIT_WORK(&xop->xop_work, target_xcopy_do_work); queue_work(xcopy_wq, &xop->xop_work); return TCM_NO_SENSE; diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h index 7db8d0c9223f..700a981c7b41 100644 --- a/drivers/target/target_core_xcopy.h +++ b/drivers/target/target_core_xcopy.h @@ -19,7 +19,6 @@ struct xcopy_op { struct se_device *dst_dev; unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; - struct percpu_ref *remote_lun_ref; sector_t src_lba; sector_t dst_lba; diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index e701356441a9..34fe36504a55 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -93,8 +93,6 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) int total_instance = 0; int cur_trip_level = get_trip_level(tz); - mutex_lock(&tz->lock); - list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if (instance->trip != trip) continue; @@ -121,8 +119,6 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) instance->cdev->updated = false; thermal_cdev_update(cdev); } - - mutex_unlock(&tz->lock); return 0; } diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c index cb6abf02c129..7e141f15a544 100644 --- a/drivers/thermal/msm_lmh_dcvs.c +++ b/drivers/thermal/msm_lmh_dcvs.c @@ -147,6 +147,7 @@ static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw) rcu_read_unlock(); max_limit = FREQ_HZ_TO_KHZ(freq_val); + sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit); trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit); notify_exit: diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index a4c0542f6141..16d45a25284f 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1347,7 +1347,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); if (IS_ERR(data->sclk)) { dev_err(&pdev->dev, "Failed to get sclk\n"); - ret = PTR_ERR(data->sclk); goto err_clk; } else { ret = clk_prepare_enable(data->sclk); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index ba86dc7125e3..f51bedc538b5 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -2414,7 +2414,7 @@ unregister: EXPORT_SYMBOL_GPL(thermal_zone_device_register); /** - * thermal_zone_device_unregister - removes the registered thermal zone device + * thermal_device_unregister - removes the registered thermal zone device * @tz: the thermal zone device to remove */ void thermal_zone_device_unregister(struct thermal_zone_device *tz) diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 4826a6db87b1..11725422dacb 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -49,8 +49,6 @@ struct xencons_info { struct xenbus_device *xbdev; struct xencons_interface *intf; unsigned int evtchn; - XENCONS_RING_IDX out_cons; - unsigned int out_cons_same; struct hvc_struct *hvc; int irq; int vtermno; @@ -100,11 +98,7 @@ static int __write_console(struct xencons_info *xencons, cons = intf->out_cons; prod = intf->out_prod; mb(); /* update queue values before going on */ - - if ((prod - cons) > sizeof(intf->out)) { - pr_err_once("xencons: Illegal ring page indices"); - return -EINVAL; - } + BUG_ON((prod - cons) > sizeof(intf->out)); while ((sent < len) && ((prod - cons) < sizeof(intf->out))) intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++]; @@ -132,10 +126,7 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len) */ while (len) { int sent = __write_console(cons, data, len); - - if (sent < 0) - return sent; - + data += sent; len -= sent; @@ -152,8 +143,6 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) XENCONS_RING_IDX cons, prod; int recv = 0; struct xencons_info *xencons = vtermno_to_xencons(vtermno); - unsigned int eoiflag = 0; - if (xencons == NULL) return -EINVAL; intf = xencons->intf; @@ -161,11 +150,7 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) cons = intf->in_cons; prod = intf->in_prod; mb(); /* get pointers before reading ring */ - - if ((prod - cons) > sizeof(intf->in)) { - pr_err_once("xencons: Illegal ring page indices"); - return -EINVAL; - } + BUG_ON((prod - cons) > sizeof(intf->in)); while (cons != prod && recv < len) buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)]; @@ -173,27 +158,7 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) mb(); /* read ring before consuming */ intf->in_cons = cons; - /* - * When to mark interrupt having been spurious: - * - there was no new data to be read, and - * - the backend did not consume some output bytes, and - * - the previous round with no read data didn't see consumed bytes - * (we might have a race with an interrupt being in flight while - * updating xencons->out_cons, so account for that by allowing one - * round without any visible reason) - */ - if (intf->out_cons != xencons->out_cons) { - xencons->out_cons = intf->out_cons; - xencons->out_cons_same = 0; - } - if (recv) { - notify_daemon(xencons); - } else if (xencons->out_cons_same++ > 1) { - eoiflag = XEN_EOI_FLAG_SPURIOUS; - } - - xen_irq_lateeoi(xencons->irq, eoiflag); - + notify_daemon(xencons); return recv; } @@ -415,7 +380,7 @@ static int xencons_connect_backend(struct xenbus_device *dev, if (ret) return ret; info->evtchn = evtchn; - irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn); + irq = bind_evtchn_to_irq(evtchn); if (irq < 0) return irq; info->irq = irq; @@ -579,7 +544,7 @@ static int __init xen_hvc_init(void) return r; info = vtermno_to_xencons(HVC_COOKIE); - info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn); + info->irq = bind_evtchn_to_irq(info->evtchn); } if (info->irq < 0) info->irq = 0; /* NO_IRQ */ diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c index 3e29f5f0d4ca..a75146f600cb 100644 --- a/drivers/tty/hvc/hvsi.c +++ b/drivers/tty/hvc/hvsi.c @@ -1051,7 +1051,7 @@ static const struct tty_operations hvsi_ops = { static int __init hvsi_init(void) { - int i, ret; + int i; hvsi_driver = alloc_tty_driver(hvsi_count); if (!hvsi_driver) @@ -1082,25 +1082,12 @@ static int __init hvsi_init(void) } hvsi_wait = wait_for_state; /* irqs active now */ - ret = tty_register_driver(hvsi_driver); - if (ret) { - pr_err("Couldn't register hvsi console driver\n"); - goto err_free_irq; - } + if (tty_register_driver(hvsi_driver)) + panic("Couldn't register hvsi console driver\n"); printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count); return 0; -err_free_irq: - hvsi_wait = poll_for_state; - for (i = 0; i < hvsi_count; i++) { - struct hvsi_struct *hp = &hvsi_ports[i]; - - free_irq(hp->virq, hp); - } - tty_driver_kref_put(hvsi_driver); - - return ret; } device_initcall(hvsi_init); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index ff58c5453b24..9b2beada2ff3 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -329,7 +329,6 @@ static struct tty_driver *gsm_tty_driver; #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 -#define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; @@ -444,7 +443,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) + if (dlci->modem_tx & TIOCM_CD) modembits |= MDM_DV; return modembits; } @@ -548,8 +547,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE - || (*input & ISO_IEC_646_MASK) == XON - || (*input & ISO_IEC_646_MASK) == XOFF) { + || *input == XON || *input == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; @@ -1506,7 +1504,7 @@ static void gsm_dlci_t1(unsigned long data) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_begin_close(dlci); /* prevent half open link */ + gsm_dlci_close(dlci); } break; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index f584de719f5a..b74de014cef1 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1406,7 +1406,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); return 0; } } @@ -1690,7 +1690,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); } } diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c index 1a3cc6ef4331..5cc80b80c82b 100644 --- a/drivers/tty/nozomi.c +++ b/drivers/tty/nozomi.c @@ -1437,7 +1437,7 @@ static int nozomi_card_init(struct pci_dev *pdev, NOZOMI_NAME, dc); if (unlikely(ret)) { dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); - goto err_free_all_kfifo; + goto err_free_kfifo; } DBG1("base_addr: %p", dc->base_addr); @@ -1475,15 +1475,12 @@ static int nozomi_card_init(struct pci_dev *pdev, return 0; err_free_tty: - for (i--; i >= 0; i--) { + for (i = 0; i < MAX_PORT; ++i) { tty_unregister_device(ntty_driver, dc->index_start + i); tty_port_destroy(&dc->port[i].port); } - free_irq(pdev->irq, dc); -err_free_all_kfifo: - i = MAX_PORT; err_free_kfifo: - for (i--; i >= PORT_MDM; i--) + for (i = 0; i < MAX_PORT; i++) kfifo_free(&dc->port[i].fifo_ul); err_free_sbuf: kfree(dc->send_buf); diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index f3ed1eeaed4e..039837db65fc 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -607,7 +607,7 @@ static struct platform_driver dw8250_platform_driver = { .name = "dw-apb-uart", .pm = &dw8250_pm_ops, .of_match_table = dw8250_of_match, - .acpi_match_table = dw8250_acpi_match, + .acpi_match_table = ACPI_PTR(dw8250_acpi_match), }, .probe = dw8250_probe, .remove = dw8250_remove, diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 2ef2871722b7..2e3ea1a70d7b 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -30,7 +30,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) +#ifdef CONFIG_64BIT if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 4bcb8dfa4874..0377b35d62b8 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -155,6 +155,11 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up, struct omap8250_priv *priv) { u8 timeout = 255; + u8 old_mdr1; + + old_mdr1 = serial_in(up, UART_OMAP_MDR1); + if (old_mdr1 == priv->mdr1) + return; serial_out(up, UART_OMAP_MDR1, priv->mdr1); udelay(2); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 43f15ac54f0d..72f6cde146b5 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -78,7 +78,7 @@ static void moan_device(const char *str, struct pci_dev *dev) static int setup_port(struct serial_private *priv, struct uart_8250_port *port, - u8 bar, unsigned int offset, int regshift) + int bar, int offset, int regshift) { struct pci_dev *dev = priv->dev; @@ -5404,30 +5404,8 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, - /* Brainboxes Devices */ /* - * Brainboxes UC-101 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0BA1, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-235/246 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0AA1, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_1_115200 }, - /* - * Brainboxes UC-257 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0861, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-260/271/701/756 + * BrainBoxes UC-260 */ { PCI_VENDOR_ID_INTASHIELD, 0x0D21, PCI_ANY_ID, PCI_ANY_ID, @@ -5435,81 +5413,7 @@ static struct pci_device_id serial_pci_tbl[] = { pbn_b2_4_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x0E34, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, - pbn_b2_4_115200 }, - /* - * Brainboxes UC-268 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0841, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_4_115200 }, - /* - * Brainboxes UC-275/279 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0881, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_8_115200 }, - /* - * Brainboxes UC-302 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x08E1, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-310 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x08C1, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-313 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x08A3, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-320/324 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0A61, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_1_115200 }, - /* - * Brainboxes UC-346 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0B02, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_4_115200 }, - /* - * Brainboxes UC-357 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0A81, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - { PCI_VENDOR_ID_INTASHIELD, 0x0A83, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_2_115200 }, - /* - * Brainboxes UC-368 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0C41, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, - pbn_b2_4_115200 }, - /* - * Brainboxes UC-420/431 - */ - { PCI_VENDOR_ID_INTASHIELD, 0x0921, - PCI_ANY_ID, PCI_ANY_ID, - 0, 0, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, pbn_b2_4_115200 }, /* * Perle PCI-RAS cards diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index d0d90752f9f3..fef1b9335f60 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -124,8 +124,7 @@ static const struct serial8250_config uart_config[] = { .name = "16C950/954", .fifo_size = 128, .tx_loadsz = 128, - .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01, - .rxtrig_bytes = {16, 32, 112, 120}, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, /* UART_CAP_EFR breaks billionon CF bluetooth card. */ .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, @@ -275,11 +274,7 @@ configured less than Maximum supported fifo bytes */ /* Uart divisor latch read */ static int default_serial_dl_read(struct uart_8250_port *up) { - /* Assign these in pieces to truncate any bits above 7. */ - unsigned char dll = serial_in(up, UART_DLL); - unsigned char dlm = serial_in(up, UART_DLM); - - return dll | dlm << 8; + return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8; } /* Uart divisor latch write */ @@ -1165,11 +1160,9 @@ static void autoconfig(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); + scratch = serial_in(up, UART_IIR) >> 6; - /* Assign this as it is to truncate any bits above 7. */ - scratch = serial_in(up, UART_IIR); - - switch (scratch >> 6) { + switch (scratch) { case 0: autoconfig_8250(up); break; diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c index 92c64ed12295..1a14948c86d6 100644 --- a/drivers/tty/serial/8250/serial_cs.c +++ b/drivers/tty/serial/8250/serial_cs.c @@ -305,7 +305,6 @@ static int serial_resume(struct pcmcia_device *link) static int serial_probe(struct pcmcia_device *link) { struct serial_info *info; - int ret; dev_dbg(&link->dev, "serial_attach()\n"); @@ -320,15 +319,7 @@ static int serial_probe(struct pcmcia_device *link) if (do_sound) link->config_flags |= CONF_ENABLE_SPKR; - ret = serial_config(link); - if (ret) - goto free_info; - - return 0; - -free_info: - kfree(info); - return ret; + return serial_config(link); } static void serial_detach(struct pcmcia_device *link) @@ -780,7 +771,6 @@ static const struct pcmcia_device_id serial_ids[] = { PCMCIA_DEVICE_PROD_ID12("Multi-Tech", "MT2834LT", 0x5f73be51, 0x4cd7c09e), PCMCIA_DEVICE_PROD_ID12("OEM ", "C288MX ", 0xb572d360, 0xd2385b7a), PCMCIA_DEVICE_PROD_ID12("Option International", "V34bis GSM/PSTN Data/Fax Modem", 0x9d7cd6f5, 0x5cb8bf41), - PCMCIA_DEVICE_PROD_ID12("Option International", "GSM-Ready 56K/ISDN", 0x9d7cd6f5, 0xb23844aa), PCMCIA_DEVICE_PROD_ID12("PCMCIA ", "C336MX ", 0x99bcafe9, 0xaa25bcab), PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "PCMCIA Dual RS-232 Serial Port Card", 0xc4420b35, 0x92abc92f), PCMCIA_DEVICE_PROD_ID12("Quatech Inc", "Dual RS-232 Serial Port PC Card", 0xc4420b35, 0x031a380d), diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 7f4ba9273966..5d41d5b92619 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -465,11 +465,14 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) == 0) uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX; + /* first, disable everything */ old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE; if (UART_ENABLE_MS(port, termios->c_cflag)) old_cr |= UART010_CR_MSIE; + writel(0, uap->port.membase + UART010_CR); + /* Set baud rate */ quot -= 1; writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index c4ce3f0b618c..cc99a2b6e3eb 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1944,13 +1944,32 @@ static const char *pl011_type(struct uart_port *port) return uap->port.type == PORT_AMBA ? uap->type : NULL; } +/* + * Release the memory region(s) being used by 'port' + */ +static void pl011_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, SZ_4K); +} + +/* + * Request the memory region(s) being used by 'port' + */ +static int pl011_request_port(struct uart_port *port) +{ + return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") + != NULL ? 0 : -EBUSY; +} + /* * Configure/autoconfigure the port. */ static void pl011_config_port(struct uart_port *port, int flags) { - if (flags & UART_CONFIG_TYPE) + if (flags & UART_CONFIG_TYPE) { port->type = PORT_AMBA; + pl011_request_port(port); + } } /* @@ -1965,8 +1984,6 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; - if (port->mapbase != (unsigned long) ser->iomem_base) - ret = -EINVAL; return ret; } @@ -1984,6 +2001,8 @@ static struct uart_ops amba_pl011_pops = { .flush_buffer = pl011_dma_flush_buffer, .set_termios = pl011_set_termios, .type = pl011_type, + .release_port = pl011_release_port, + .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2013,6 +2032,8 @@ static const struct uart_ops sbsa_uart_pops = { .shutdown = sbsa_uart_shutdown, .set_termios = sbsa_uart_set_termios, .type = pl011_type, + .release_port = pl011_release_port, + .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2471,7 +2492,6 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match); static const struct acpi_device_id sbsa_uart_acpi_match[] = { { "ARMH0011", 0 }, - { "ARMHB000", 0 }, {}, }; MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index e49493703179..3bd19de7df71 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -928,13 +928,6 @@ static void atmel_tx_dma(struct uart_port *port) desc->callback = atmel_complete_tx_dma; desc->callback_param = atmel_port; atmel_port->cookie_tx = dmaengine_submit(desc); - if (dma_submit_error(atmel_port->cookie_tx)) { - dev_err(port->dev, "dma_submit_error %d\n", - atmel_port->cookie_tx); - return; - } - - dma_async_issue_pending(chan); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -1193,13 +1186,6 @@ static int atmel_prepare_rx_dma(struct uart_port *port) desc->callback_param = port; atmel_port->desc_rx = desc; atmel_port->cookie_rx = dmaengine_submit(desc); - if (dma_submit_error(atmel_port->cookie_rx)) { - dev_err(port->dev, "dma_submit_error %d\n", - atmel_port->cookie_rx); - goto chan_err; - } - - dma_async_issue_pending(atmel_port->chan_rx); return 0; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 1319f3dd5b70..1544a7cc76ff 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1681,9 +1681,6 @@ lpuart32_console_get_options(struct lpuart_port *sport, int *baud, bd = lpuart32_read(sport->port.membase + UARTBAUD); bd &= UARTBAUD_SBR_MASK; - if (!bd) - return; - sbr = bd; uartclk = clk_get_rate(sport->clk); /* diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c index 4ed0c099c757..932b2accd06f 100644 --- a/drivers/tty/serial/jsm/jsm_neo.c +++ b/drivers/tty/serial/jsm/jsm_neo.c @@ -827,9 +827,7 @@ static inline void neo_parse_isr(struct jsm_board *brd, u32 port) /* Parse any modem signal changes */ jsm_dbg(INTR, &ch->ch_bd->pci_dev, "MOD_STAT: sending to parse_modem_sigs\n"); - spin_lock_irqsave(&ch->uart_port.lock, lock_flags); neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); - spin_unlock_irqrestore(&ch->uart_port.lock, lock_flags); } } diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c index dad3abab8280..524e86ab3cae 100644 --- a/drivers/tty/serial/jsm/jsm_tty.c +++ b/drivers/tty/serial/jsm/jsm_tty.c @@ -195,7 +195,6 @@ static void jsm_tty_break(struct uart_port *port, int break_state) static int jsm_tty_open(struct uart_port *port) { - unsigned long lock_flags; struct jsm_board *brd; struct jsm_channel *channel = container_of(port, struct jsm_channel, uart_port); @@ -249,7 +248,6 @@ static int jsm_tty_open(struct uart_port *port) channel->ch_cached_lsr = 0; channel->ch_stops_sent = 0; - spin_lock_irqsave(&port->lock, lock_flags); termios = &port->state->port.tty->termios; channel->ch_c_cflag = termios->c_cflag; channel->ch_c_iflag = termios->c_iflag; @@ -269,7 +267,6 @@ static int jsm_tty_open(struct uart_port *port) jsm_carrier(channel); channel->ch_open_count++; - spin_unlock_irqrestore(&port->lock, lock_flags); jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n"); return 0; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 5b1e2a16dccc..cfb6ecdbb99f 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -615,9 +615,6 @@ static void msm_start_rx_dma(struct msm_port *msm_port) u32 val; int ret; - if (IS_ENABLED(CONFIG_CONSOLE_POLL)) - return; - if (!dma->chan) return; diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index b7d1b1645c84..056f91b3a4ca 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -198,6 +198,7 @@ struct rp2_card { void __iomem *bar0; void __iomem *bar1; spinlock_t card_lock; + struct completion fw_loaded; }; #define RP_ID(prod) PCI_VDEVICE(RP, (prod)) @@ -666,10 +667,17 @@ static void rp2_remove_ports(struct rp2_card *card) card->initialized_ports = 0; } -static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) +static void rp2_fw_cb(const struct firmware *fw, void *context) { + struct rp2_card *card = context; resource_size_t phys_base; - int i, rc = 0; + int i, rc = -ENOENT; + + if (!fw) { + dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n", + RP2_FW_NAME); + goto no_fw; + } phys_base = pci_resource_start(card->pdev, 1); @@ -715,13 +723,23 @@ static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) card->initialized_ports++; } - return rc; + release_firmware(fw); +no_fw: + /* + * rp2_fw_cb() is called from a workqueue long after rp2_probe() + * has already returned success. So if something failed here, + * we'll just leave the now-dormant device in place until somebody + * unbinds it. + */ + if (rc) + dev_warn(&card->pdev->dev, "driver initialization failed\n"); + + complete(&card->fw_loaded); } static int rp2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - const struct firmware *fw; struct rp2_card *card; struct rp2_uart_port *ports; void __iomem * const *bars; @@ -732,6 +750,7 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; pci_set_drvdata(pdev, card); spin_lock_init(&card->card_lock); + init_completion(&card->fw_loaded); rc = pcim_enable_device(pdev); if (rc) @@ -764,24 +783,22 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; card->ports = ports; - rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev); - if (rc < 0) { - dev_err(&pdev->dev, "cannot find '%s' firmware image\n", - RP2_FW_NAME); - return rc; - } - - rc = rp2_load_firmware(card, fw); - - release_firmware(fw); - if (rc < 0) - return rc; - rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, IRQF_SHARED, DRV_NAME, card); if (rc) return rc; + /* + * Only catastrophic errors (e.g. ENOMEM) are reported here. + * If the FW image is missing, we'll find out in rp2_fw_cb() + * and print an error message. + */ + rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev, + GFP_KERNEL, card, rp2_fw_cb); + if (rc) + return rc; + dev_dbg(&pdev->dev, "waiting for firmware blob...\n"); + return 0; } @@ -789,6 +806,7 @@ static void rp2_remove(struct pci_dev *pdev) { struct rp2_card *card = pci_get_drvdata(pdev); + wait_for_completion(&card->fw_loaded); rp2_remove_ports(card); } diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index d8dffcb6f5be..99122059c750 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2250,8 +2250,7 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, * We probably don't need a spinlock around this, but */ spin_lock_irqsave(&port->lock, flags); - port->mctrl &= TIOCM_DTR; - port->ops->set_mctrl(port, port->mctrl); + port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); spin_unlock_irqrestore(&port->lock, flags); /* diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 83d3645ac2ef..f89d1f79be18 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -279,7 +279,7 @@ static void stm32_start_tx(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; - if (uart_circ_empty(xmit) && !port->x_char) + if (uart_circ_empty(xmit)) return; stm32_set_bits(port, USART_CR1, USART_CR1_TXEIE | USART_CR1_TE); diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 832aec1f145f..4706df20191b 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -519,9 +519,6 @@ static void flush_to_ldisc(struct work_struct *work) if (!count) break; head->read += count; - - if (need_resched()) - cond_resched(); } mutex_unlock(&buf->lock); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c896b4e98aaa..b7effcfee91d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -537,8 +537,8 @@ static void __proc_set_tty(struct tty_struct *tty) put_pid(tty->session); put_pid(tty->pgrp); tty->pgrp = get_pid(task_pgrp(current)); - tty->session = get_pid(task_session(current)); spin_unlock_irqrestore(&tty->ctrl_lock, flags); + tty->session = get_pid(task_session(current)); if (current->signal->tty) { tty_debug(tty, "current tty %s not NULL!!\n", current->signal->tty->name); @@ -929,24 +929,21 @@ void disassociate_ctty(int on_exit) spin_lock_irq(¤t->sighand->siglock); put_pid(current->signal->tty_old_pgrp); current->signal->tty_old_pgrp = NULL; - tty = tty_kref_get(current->signal->tty); - spin_unlock_irq(¤t->sighand->siglock); + tty = tty_kref_get(current->signal->tty); if (tty) { unsigned long flags; - - tty_lock(tty); spin_lock_irqsave(&tty->ctrl_lock, flags); put_pid(tty->session); put_pid(tty->pgrp); tty->session = NULL; tty->pgrp = NULL; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - tty_unlock(tty); tty_kref_put(tty); } else tty_debug_hangup(tty, "no current tty\n"); + spin_unlock_irq(¤t->sighand->siglock); /* Now clear signal->tty under the lock */ read_lock(&tasklist_lock); session_clear_tty(task_session(current)); @@ -2287,6 +2284,8 @@ static int tty_fasync(int fd, struct file *filp, int on) * Locking: * Called functions take tty_ldiscs_lock * current->signal->tty check is safe without locks + * + * FIXME: may race normal receive processing */ static int tiocsti(struct tty_struct *tty, char __user *p) @@ -2300,10 +2299,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) return -EFAULT; tty_audit_tiocsti(tty, ch); ld = tty_ldisc_ref_wait(tty); - tty_buffer_lock_exclusive(tty->port); if (ld->ops->receive_buf) ld->ops->receive_buf(tty, &ch, &mbz, 1); - tty_buffer_unlock_exclusive(tty->port); tty_ldisc_deref(ld); return 0; } @@ -2604,19 +2601,14 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t return -ENOTTY; if (retval) return retval; - + if (!current->signal->tty || + (current->signal->tty != real_tty) || + (real_tty->session != task_session(current))) + return -ENOTTY; if (get_user(pgrp_nr, p)) return -EFAULT; if (pgrp_nr < 0) return -EINVAL; - - spin_lock_irq(&real_tty->ctrl_lock); - if (!current->signal->tty || - (current->signal->tty != real_tty) || - (real_tty->session != task_session(current))) { - retval = -ENOTTY; - goto out_unlock_ctrl; - } rcu_read_lock(); pgrp = find_vpid(pgrp_nr); retval = -ESRCH; @@ -2626,12 +2618,12 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t if (session_of_pgrp(pgrp) != task_session(current)) goto out_unlock; retval = 0; + spin_lock_irq(&tty->ctrl_lock); put_pid(real_tty->pgrp); real_tty->pgrp = get_pid(pgrp); + spin_unlock_irq(&tty->ctrl_lock); out_unlock: rcu_read_unlock(); -out_unlock_ctrl: - spin_unlock_irq(&real_tty->ctrl_lock); return retval; } @@ -2643,31 +2635,21 @@ out_unlock_ctrl: * * Obtain the session id of the tty. If there is no session * return an error. + * + * Locking: none. Reference to current->signal->tty is safe. */ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) { - unsigned long flags; - pid_t sid; - /* * (tty == real_tty) is a cheap way of * testing if the tty is NOT a master pty. */ if (tty == real_tty && current->signal->tty != real_tty) return -ENOTTY; - - spin_lock_irqsave(&real_tty->ctrl_lock, flags); if (!real_tty->session) - goto err; - sid = pid_vnr(real_tty->session); - spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); - - return put_user(sid, p); - -err: - spin_unlock_irqrestore(&real_tty->ctrl_lock, flags); - return -ENOTTY; + return -ENOTTY; + return put_user(pid_vnr(real_tty->session), p); } /** @@ -2762,14 +2744,14 @@ out: * @p: pointer to result * * Obtain the modem status bits from the tty driver if the feature - * is supported. Return -ENOTTY if it is not available. + * is supported. Return -EINVAL if it is not available. * * Locking: none (up to the driver) */ static int tty_tiocmget(struct tty_struct *tty, int __user *p) { - int retval = -ENOTTY; + int retval = -EINVAL; if (tty->ops->tiocmget) { retval = tty->ops->tiocmget(tty); @@ -2787,7 +2769,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p) * @p: pointer to desired bits * * Set the modem status bits from the tty driver if the feature - * is supported. Return -ENOTTY if it is not available. + * is supported. Return -EINVAL if it is not available. * * Locking: none (up to the driver) */ @@ -2799,7 +2781,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd, unsigned int set, clear, val; if (tty->ops->tiocmset == NULL) - return -ENOTTY; + return -EINVAL; retval = get_user(val, p); if (retval) @@ -3079,14 +3061,10 @@ void __do_SAK(struct tty_struct *tty) struct task_struct *g, *p; struct pid *session; int i; - unsigned long flags; if (!tty) return; - - spin_lock_irqsave(&tty->ctrl_lock, flags); - session = get_pid(tty->session); - spin_unlock_irqrestore(&tty->ctrl_lock, flags); + session = tty->session; tty_ldisc_flush(tty); @@ -3122,7 +3100,6 @@ void __do_SAK(struct tty_struct *tty) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); - put_pid(session); #endif } diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index e8301dcf4c84..c8c91f0476a2 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -494,7 +494,7 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos) p2[unicode & 0x3f] = fontpos; - p->sum += (fontpos << 20U) + unicode; + p->sum += (fontpos << 20) + unicode; return 0; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 0fab196a1d90..7ec5e6dd60e5 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -826,7 +826,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, /* Resizes the resolution of the display adapater */ int err = 0; - if (vc->vc_sw->con_resize) + if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize) err = vc->vc_sw->con_resize(vc, width, height, user); return err; @@ -882,25 +882,8 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, new_row_size = new_cols << 1; new_screen_size = new_row_size * new_rows; - if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) { - /* - * This function is being called here to cover the case - * where the userspace calls the FBIOPUT_VSCREENINFO twice, - * passing the same fb_var_screeninfo containing the fields - * yres/xres equal to a number non-multiple of vc_font.height - * and yres_virtual/xres_virtual equal to number lesser than the - * vc_font.height and yres/xres. - * In the second call, the struct fb_var_screeninfo isn't - * being modified by the underlying driver because of the - * if above, and this causes the fbcon_display->vrows to become - * negative and it eventually leads to out-of-bound - * access by the imageblit function. - * To give the correct values to the struct and to not have - * to deal with possible errors from the code below, we call - * the resize_screen here as well. - */ - return resize_screen(vc, new_cols, new_rows, user); - } + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) + return 0; if (new_screen_size > (4 << 20)) return -EINVAL; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 295b20d8692c..b111071d19b5 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -487,19 +487,16 @@ int vt_ioctl(struct tty_struct *tty, ret = -EINVAL; goto out; } - console_lock(); - if (vc->vc_mode == (unsigned char) arg) { - console_unlock(); + /* FIXME: this needs the console lock extending */ + if (vc->vc_mode == (unsigned char) arg) break; - } vc->vc_mode = (unsigned char) arg; - if (console != fg_console) { - console_unlock(); + if (console != fg_console) break; - } /* * explicitly blank/unblank the screen if switching modes */ + console_lock(); if (arg == KD_TEXT) do_unblank_screen(1); else @@ -694,7 +691,6 @@ int vt_ioctl(struct tty_struct *tty, ret = -ENXIO; else { arg--; - arg = array_index_nospec(arg, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(arg); console_unlock(); @@ -719,9 +715,9 @@ int vt_ioctl(struct tty_struct *tty, if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) ret = -ENXIO; else { - vsa.console--; vsa.console = array_index_nospec(vsa.console, - MAX_NR_CONSOLES); + MAX_NR_CONSOLES + 1); + vsa.console--; console_lock(); ret = vc_allocate(vsa.console); if (ret == 0) { @@ -902,17 +898,17 @@ int vt_ioctl(struct tty_struct *tty, if (vcp) { int ret; int save_scan_lines = vcp->vc_scan_lines; - int save_cell_height = vcp->vc_cell_height; + int save_font_height = vcp->vc_font.height; if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) - vcp->vc_cell_height = v.v_clin; + vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; ret = vc_resize(vcp, v.v_cols, v.v_rows); if (ret) { vcp->vc_scan_lines = save_scan_lines; - vcp->vc_cell_height = save_cell_height; + vcp->vc_font.height = save_font_height; console_unlock(); return ret; } diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 8aced1a4a48b..2949289bb3c5 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -58,8 +58,7 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = { static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = { .flags = CI_HDRC_SUPPORTS_RUNTIME_PM | - CI_HDRC_TURN_VBUS_EARLY_ON | - CI_HDRC_DISABLE_DEVICE_STREAMING, + CI_HDRC_TURN_VBUS_EARLY_ON, }; static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = { @@ -127,13 +126,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) misc_pdev = of_find_device_by_node(args.np); of_node_put(args.np); - if (!misc_pdev) + if (!misc_pdev || !platform_get_drvdata(misc_pdev)) return ERR_PTR(-EPROBE_DEFER); - if (!platform_get_drvdata(misc_pdev)) { - put_device(&misc_pdev->dev); - return ERR_PTR(-EPROBE_DEFER); - } data->dev = &misc_pdev->dev; if (of_find_property(np, "disable-over-current", NULL)) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 9b3260a7cd2c..e104c99b3a1f 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -518,7 +518,7 @@ int hw_device_reset(struct ci_hdrc *ci) return 0; } -static irqreturn_t ci_irq_handler(int irq, void *data) +static irqreturn_t ci_irq(int irq, void *data) { struct ci_hdrc *ci = data; irqreturn_t ret = IRQ_NONE; @@ -571,15 +571,6 @@ static irqreturn_t ci_irq_handler(int irq, void *data) return ret; } -static void ci_irq(struct ci_hdrc *ci) -{ - unsigned long flags; - - local_irq_save(flags); - ci_irq_handler(ci->irq, ci); - local_irq_restore(flags); -} - static int ci_vbus_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -593,7 +584,7 @@ static int ci_vbus_notifier(struct notifier_block *nb, unsigned long event, vbus->changed = true; - ci_irq(ci); + ci_irq(ci->irq, ci); return NOTIFY_DONE; } @@ -610,7 +601,7 @@ static int ci_id_notifier(struct notifier_block *nb, unsigned long event, id->changed = true; - ci_irq(ci); + ci_irq(ci->irq, ci); return NOTIFY_DONE; } @@ -1032,7 +1023,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, ci); - ret = devm_request_irq(dev, ci->irq, ci_irq_handler, IRQF_SHARED, + ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED, ci->platdata->name, ci); if (ret) goto stop; @@ -1147,11 +1138,11 @@ static void ci_extcon_wakeup_int(struct ci_hdrc *ci) if (!IS_ERR(cable_id->edev) && ci->is_otg && (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) - ci_irq(ci); + ci_irq(ci->irq, ci); if (!IS_ERR(cable_vbus->edev) && ci->is_otg && (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) - ci_irq(ci); + ci_irq(ci->irq, ci); } static int ci_controller_resume(struct device *dev) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index d869f37b1d23..a74a5c0eba6e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -334,10 +334,8 @@ static void acm_ctrl_irq(struct urb *urb) acm->iocount.dsr++; if (difference & ACM_CTRL_DCD) acm->iocount.dcd++; - if (newctrl & ACM_CTRL_BRK) { + if (newctrl & ACM_CTRL_BRK) acm->iocount.brk++; - tty_insert_flip_char(&acm->port, 0, TTY_BREAK); - } if (newctrl & ACM_CTRL_RI) acm->iocount.rng++; if (newctrl & ACM_CTRL_FRAMING) @@ -348,9 +346,6 @@ static void acm_ctrl_irq(struct urb *urb) acm->iocount.overrun++; spin_unlock(&acm->read_lock); - if (newctrl & ACM_CTRL_BRK) - tty_flip_buffer_push(&acm->port); - if (difference) wake_up_all(&acm->wioctl); @@ -410,16 +405,11 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags) static void acm_process_read_urb(struct acm *acm, struct urb *urb) { - unsigned long flags; - if (!urb->actual_length) return; - spin_lock_irqsave(&acm->read_lock, flags); tty_insert_flip_string(&acm->port, urb->transfer_buffer, urb->actual_length); - spin_unlock_irqrestore(&acm->read_lock, flags); - tty_flip_buffer_push(&acm->port); } @@ -558,8 +548,7 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise) res = acm_set_control(acm, val); if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) - /* This is broken in too many devices to spam the logs */ - dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); + dev_err(&acm->control->dev, "failed to set dtr/rts\n"); } static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) @@ -891,6 +880,8 @@ static int set_serial_info(struct acm *acm, if ((new_serial.close_delay != old_close_delay) || (new_serial.closing_wait != old_closing_wait)) retval = -EPERM; + else + retval = -EOPNOTSUPP; } else { acm->port.close_delay = close_delay; acm->port.closing_wait = closing_wait; @@ -1508,11 +1499,6 @@ skip_countries: return 0; alloc_fail8: - if (!acm->combined_interfaces) { - /* Clear driver data so that disconnect() returns early. */ - usb_set_intfdata(data_interface, NULL); - usb_driver_release_interface(&acm_driver, data_interface); - } if (acm->country_codes) { device_remove_file(&acm->control->dev, &dev_attr_wCountryCodes); @@ -1908,10 +1894,6 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */ .driver_info = IGNORE_DEVICE, }, - - { USB_DEVICE(0x04d8, 0xf58b), - .driver_info = IGNORE_DEVICE, - }, #endif /*Samsung phone in firmware update mode */ @@ -1942,16 +1924,6 @@ static const struct usb_device_id acm_ids[] = { .driver_info = SEND_ZERO_PACKET, }, - /* Exclude Goodix Fingerprint Reader */ - { USB_DEVICE(0x27c6, 0x5395), - .driver_info = IGNORE_DEVICE, - }, - - /* Exclude Heimann Sensor GmbH USB appset demo */ - { USB_DEVICE(0x32a7, 0x0000), - .driver_info = IGNORE_DEVICE, - }, - /* control interfaces without any protocol set */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_PROTO_NONE) }, diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 582099f4f449..71c2ae4b8106 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -289,25 +289,8 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i #define usblp_reset(usblp)\ usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0) -static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel) -{ - u8 *buf; - int ret; - - buf = kzalloc(1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, - USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, - channel, buf, 1); - if (ret == 0) - *new_channel = buf[0]; - - kfree(buf); - - return ret; -} +#define usblp_hp_channel_change_request(usblp, channel, buffer) \ + usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1) /* * See the description for usblp_select_alts() below for the usage @@ -1349,17 +1332,14 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; - /* Don't unnecessarily set the interface if there's a single alt. */ - if (usblp->intf->num_altsetting > 1) { - alts = usblp->protocol[protocol].alt_setting; - if (alts < 0) - return -EINVAL; - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); - if (r < 0) { - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", - alts, usblp->ifnum); - return r; - } + alts = usblp->protocol[protocol].alt_setting; + if (alts < 0) + return -EINVAL; + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); + if (r < 0) { + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", + alts, usblp->ifnum); + return r; } usblp->bidir = (usblp->protocol[protocol].epread != NULL); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index b4c8d52414ae..426e40f1232f 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -375,7 +375,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0 * (see the end of section 5.6.3), so don't warn about them. */ - maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); + maxp = usb_endpoint_maxp(&endpoint->desc); if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", cfgno, inum, asnum, d->bEndpointAddress); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 758ce722d775..4740c307e02d 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -731,7 +731,6 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) { struct urb *urb; int length; - int status; unsigned long flags; char buffer[6]; /* Any root hubs with > 31 ports? */ @@ -749,17 +748,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) if (urb) { clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags); hcd->status_urb = NULL; - if (urb->transfer_buffer_length >= length) { - status = 0; - } else { - status = -EOVERFLOW; - length = urb->transfer_buffer_length; - } urb->actual_length = length; memcpy(urb->transfer_buffer, buffer, length); usb_hcd_unlink_urb_from_ep(hcd, urb); - usb_hcd_giveback_urb(hcd, urb, status); + usb_hcd_giveback_urb(hcd, urb, 0); } else { length = 0; set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags); @@ -1642,13 +1635,6 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) urb->hcpriv = NULL; INIT_LIST_HEAD(&urb->urb_list); atomic_dec(&urb->use_count); - /* - * Order the write of urb->use_count above before the read - * of urb->reject below. Pairs with the memory barriers in - * usb_kill_urb() and usb_poison_urb(). - */ - smp_mb__after_atomic(); - atomic_dec(&urb->dev->urbnum); if (atomic_read(&urb->reject)) wake_up(&usb_kill_urb_queue); @@ -1758,13 +1744,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); - /* - * Order the write of urb->use_count above before the read - * of urb->reject below. Pairs with the memory barriers in - * usb_kill_urb() and usb_poison_urb(). - */ - smp_mb__after_atomic(); - if (unlikely(atomic_read(&urb->reject))) wake_up(&usb_kill_urb_queue); usb_put_urb(urb); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 22d9ef88f5e2..6253f2eb95e2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1067,10 +1067,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) } else { hub_power_on(hub, true); } - /* Give some time on remote wakeup to let links to transit to U0 */ - } else if (hub_is_superspeed(hub->hdev)) - msleep(20); - + } init2: /* @@ -1185,7 +1182,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) */ if (portchange || (hub_is_superspeed(hub->hdev) && port_resumed)) - set_bit(port1, hub->event_bits); + set_bit(port1, hub->change_bits); } else if (udev->persist_enabled) { #ifdef CONFIG_PM @@ -3448,6 +3445,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) * sequence. */ status = hub_port_status(hub, port1, &portstatus, &portchange); + + /* TRSMRCY = 10 msec */ + usleep_range(10000, 10500); } SuspendCleared: @@ -3462,9 +3462,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_SUSPEND); } - - /* TRSMRCY = 10 msec */ - usleep_range(10000, 10500); } if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) @@ -3854,47 +3851,6 @@ static int usb_set_lpm_timeout(struct usb_device *udev, return 0; } -/* - * Don't allow device intiated U1/U2 if the system exit latency + one bus - * interval is greater than the minimum service interval of any active - * periodic endpoint. See USB 3.2 section 9.4.9 - */ -static bool usb_device_may_initiate_lpm(struct usb_device *udev, - enum usb3_link_state state) -{ - unsigned int sel; /* us */ - int i, j; - - if (state == USB3_LPM_U1) - sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); - else if (state == USB3_LPM_U2) - sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); - else - return false; - - for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { - struct usb_interface *intf; - struct usb_endpoint_descriptor *desc; - unsigned int interval; - - intf = udev->actconfig->interface[i]; - if (!intf) - continue; - - for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) { - desc = &intf->cur_altsetting->endpoint[j].desc; - - if (usb_endpoint_xfer_int(desc) || - usb_endpoint_xfer_isoc(desc)) { - interval = (1 << (desc->bInterval - 1)) * 125; - if (sel + 125 > interval) - return false; - } - } - } - return true; -} - /* * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated * U1/U2 entry. @@ -3967,23 +3923,20 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, * U1/U2_ENABLE */ if (udev->actconfig && - usb_device_may_initiate_lpm(udev, state)) { - if (usb_set_device_initiated_lpm(udev, state, true)) { - /* - * Request to enable device initiated U1/U2 failed, - * better to turn off lpm in this case. - */ - usb_set_lpm_timeout(udev, state, 0); - hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); - return; - } + usb_set_device_initiated_lpm(udev, state, true) == 0) { + if (state == USB3_LPM_U1) + udev->usb3_lpm_u1_enabled = 1; + else if (state == USB3_LPM_U2) + udev->usb3_lpm_u2_enabled = 1; + } else { + /* Don't request U1/U2 entry if the device + * cannot transition to U1/U2. + */ + usb_set_lpm_timeout(udev, state, 0); + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); } - - if (state == USB3_LPM_U1) - udev->usb3_lpm_u1_enabled = 1; - else if (state == USB3_LPM_U2) - udev->usb3_lpm_u2_enabled = 1; } + /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated * U1/U2 entry. @@ -4413,6 +4366,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, if (oldspeed == USB_SPEED_LOW) delay = HUB_LONG_RESET_TIME; + mutex_lock(hcd->address0_mutex); + /* Reset the device; full speed may morph to high speed */ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ retval = hub_port_reset(hub, port1, udev, delay, false); @@ -4703,6 +4658,7 @@ fail: hub_port_disable(hub, port1, 0); update_devnum(udev, devnum); /* for disconnect processing */ } + mutex_unlock(hcd->address0_mutex); return retval; } @@ -4792,7 +4748,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, struct usb_port *port_dev = hub->ports[port1 - 1]; struct usb_device *udev = port_dev->child; static int unreliable_port = -1; - bool retry_locked; /* Disconnect any existing devices under this port */ if (udev) { @@ -4853,11 +4808,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, unit_load = 100; status = 0; - for (i = 0; i < SET_CONFIG_TRIES; i++) { - usb_lock_port(port_dev); - mutex_lock(hcd->address0_mutex); - retry_locked = true; /* reallocate for each attempt, since references * to the previous one can escape in various ways @@ -4866,8 +4817,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, if (!udev) { dev_err(&port_dev->dev, "couldn't allocate usb_device\n"); - mutex_unlock(hcd->address0_mutex); - usb_unlock_port(port_dev); goto done; } @@ -4889,14 +4838,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, } /* reset (non-USB 3.0 devices) and get descriptor */ + usb_lock_port(port_dev); status = hub_port_init(hub, udev, port1, i); + usb_unlock_port(port_dev); if (status < 0) goto loop; - mutex_unlock(hcd->address0_mutex); - usb_unlock_port(port_dev); - retry_locked = false; - if (udev->quirks & USB_QUIRK_DELAY_INIT) msleep(2000); @@ -4989,10 +4936,6 @@ loop: usb_ep0_reinit(udev); release_devnum(udev); hub_free_dev(udev); - if (retry_locked) { - mutex_unlock(hcd->address0_mutex); - usb_unlock_port(port_dev); - } usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; @@ -5545,8 +5488,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; - mutex_lock(hcd->address0_mutex); - for (i = 0; i < SET_CONFIG_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. @@ -5556,7 +5497,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV) break; } - mutex_unlock(hcd->address0_mutex); if (ret < 0) goto re_enumerate; diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index f824c7f78ae5..688817fb3246 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -140,10 +140,8 @@ static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) { unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2; - if (!hub->hdev->parent) /* root hub */ - return delay; - else /* Wait at least 100 msec for power to become stable */ - return max(delay, 100U); + /* Wait at least 100 msec for power to become stable */ + return max(delay, 100U); } static inline int hub_port_debounce_be_connected(struct usb_hub *hub, diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 2ca6ed207e26..34d8cece6dd3 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -189,9 +189,6 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, - /* Agfa SNAPSCAN 1212U */ - { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */ { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -230,9 +227,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, - /* ELMO L-12F document camera */ - { USB_DEVICE(0x09a1, 0x0028), .driver_info = USB_QUIRK_DELAY_CTRL_MSG }, - /* Broadcom BCM92035DGROM BT dongle */ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -245,7 +239,6 @@ static const struct usb_device_id usb_quirk_list[] = { /* Realtek hub in Dell WD19 (Type-C) */ { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, - { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME }, /* Generic RTL8153 based ethernet adapters */ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, @@ -265,9 +258,6 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1532, 0x0116), .driver_info = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, - /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */ - { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM }, - /* BUILDWIN Photo Frame */ { USB_DEVICE(0x1908, 0x1315), .driver_info = USB_QUIRK_HONOR_BNUMINTERFACES }, diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 693f352b8e07..8c4bfd42f785 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -686,12 +686,6 @@ void usb_kill_urb(struct urb *urb) if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); - /* - * Order the write of urb->reject above before the read - * of urb->use_count below. Pairs with the barriers in - * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). - */ - smp_mb__after_atomic(); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); @@ -733,12 +727,6 @@ void usb_poison_urb(struct urb *urb) if (!urb) return; atomic_inc(&urb->reject); - /* - * Order the write of urb->reject above before the read - * of urb->use_count below. Pairs with the barriers in - * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). - */ - smp_mb__after_atomic(); if (!urb->dev || !urb->ep) return; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index ecc4d09ea704..a899d47c2a7c 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -144,7 +144,6 @@ struct dwc2_hsotg_req; * @lock: State lock to protect contents of endpoint. * @dir_in: Set to true if this endpoint is of the IN direction, which * means that it is sending data to the Host. - * @map_dir: Set to the value of dir_in when the DMA buffer is mapped. * @index: The index for the endpoint registers. * @mc: Multi Count - number of transactions per microframe * @interval - Interval for periodic endpoints @@ -186,7 +185,6 @@ struct dwc2_hsotg_ep { unsigned short fifo_index; unsigned char dir_in; - unsigned char map_dir; unsigned char index; unsigned char mc; u16 interval; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index bee05975eef5..e5ad717cba22 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -289,7 +289,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, if (hs_req->req.length == 0) return; - usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir); + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); } /** @@ -707,7 +707,6 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, if (hs_req->req.length == 0) return 0; - hs_ep->map_dir = hs_ep->dir_in; ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in); if (ret) goto dma_error; @@ -872,6 +871,7 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, u32 windex) { + struct dwc2_hsotg_ep *ep; int dir = (windex & USB_DIR_IN) ? 1 : 0; int idx = windex & 0x7F; @@ -881,7 +881,12 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, if (idx > hsotg->num_of_eps) return NULL; - return index_to_ep(hsotg, idx, dir); + ep = index_to_ep(hsotg, idx, dir); + + if (idx && ep->dir_in != dir) + return NULL; + + return ep; } /** @@ -3688,7 +3693,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) hsotg->gadget.speed = USB_SPEED_UNKNOWN; spin_unlock_irqrestore(&hsotg->lock, flags); - for (ep = 1; ep < hsotg->num_of_eps; ep++) { + for (ep = 0; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep); if (hsotg->eps_out[ep]) diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c index 9c030e0033fe..51866f3f2052 100644 --- a/drivers/usb/dwc2/hcd_intr.c +++ b/drivers/usb/dwc2/hcd_intr.c @@ -461,7 +461,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg, &short_read); if (urb->actual_length + xfer_length > urb->length) { - dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__); + dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__); xfer_length = urb->length - urb->actual_length; } @@ -1915,18 +1915,6 @@ error: qtd->error_count++; dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd, DWC2_HC_XFER_XACT_ERR); - /* - * We can get here after a completed transaction - * (urb->actual_length >= urb->length) which was not reported - * as completed. If that is the case, and we do not abort - * the transfer, a transfer of size 0 will be enqueued - * subsequently. If urb->actual_length is not DMA-aligned, - * the buffer will then point to an unaligned address, and - * the resulting behavior is undefined. Bail out in that - * situation. - */ - if (qtd->urb->actual_length >= qtd->urb->length) - qtd->error_count = 3; dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR); } diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 00aa9627400c..ce3e1f11cbc5 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -354,9 +354,6 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le) epnum |= 1; dep = dwc->eps[epnum]; - if (dep == NULL) - return NULL; - if (dep->flags & DWC3_EP_ENABLED) return dep; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 782fe570d63e..3cea65c86697 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -560,23 +560,8 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1); if (desc->bInterval) { - u8 bInterval_m1; - - /* - * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it - * must be set to 0 when the controller operates in full-speed. - */ - bInterval_m1 = min_t(u8, desc->bInterval - 1, 13); - if (dwc->gadget.speed == USB_SPEED_FULL) - bInterval_m1 = 0; - - if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT && - dwc->gadget.speed == USB_SPEED_FULL) - dep->interval = desc->bInterval; - else - dep->interval = 1 << (desc->bInterval - 1); - - params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1); + params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1); + dep->interval = 1 << (desc->bInterval - 1); } return dwc3_send_gadget_ep_cmd(dwc, dep->number, @@ -940,19 +925,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); - /* - * As per data book 4.2.3.2TRB Control Bit Rules section - * - * The controller autonomously checks the HWO field of a TRB to determine if the - * entire TRB is valid. Therefore, software must ensure that the rest of the TRB - * is valid before setting the HWO field to '1'. In most systems, this means that - * software must update the fourth DWORD of a TRB last. - * - * However there is a possibility of CPU re-ordering here which can cause - * controller to observe the HWO bit set prematurely. - * Add a write memory barrier to prevent CPU re-ordering. - */ - wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; trace_dwc3_prepare_trb(dep, trb); @@ -3479,7 +3451,6 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) start_time = ktime_get(); - local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] = 0; @@ -3487,7 +3458,6 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc) ret |= dwc3_process_event_buf(dwc, i); spin_unlock_irqrestore(&dwc->lock, flags); - local_bh_enable(); temp_time = ktime_to_us(ktime_sub(ktime_get(), start_time)); dwc->bh_completion_time[dwc->bh_dbg_index] = temp_time; diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c index 44f1a496633c..ec004c6d76f2 100644 --- a/drivers/usb/dwc3/ulpi.c +++ b/drivers/usb/dwc3/ulpi.c @@ -10,8 +10,6 @@ * published by the Free Software Foundation. */ -#include -#include #include #include "core.h" @@ -22,22 +20,12 @@ DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \ DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a)) -#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L) - -static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read) +static int dwc3_ulpi_busyloop(struct dwc3 *dwc) { - unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY; - unsigned int count = 1000; + unsigned count = 1000; u32 reg; - if (addr >= ULPI_EXT_VENDOR_SPECIFIC) - ns += DWC3_ULPI_BASE_DELAY; - - if (read) - ns += DWC3_ULPI_BASE_DELAY; - while (count--) { - ndelay(ns); reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0)); if (!(reg & DWC3_GUSB2PHYACC_BUSY)) return 0; @@ -56,7 +44,7 @@ static int dwc3_ulpi_read(struct ulpi_ops *ops, u8 addr) reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr); dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - ret = dwc3_ulpi_busyloop(dwc, addr, true); + ret = dwc3_ulpi_busyloop(dwc); if (ret) return ret; @@ -74,7 +62,7 @@ static int dwc3_ulpi_write(struct ulpi_ops *ops, u8 addr, u8 val) reg |= DWC3_GUSB2PHYACC_WRITE | val; dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg); - return dwc3_ulpi_busyloop(dwc, addr, false); + return dwc3_ulpi_busyloop(dwc); } static struct ulpi_ops dwc3_ulpi_ops = { diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 4fac4ac46bdd..2e51f79e7273 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -306,7 +306,6 @@ config USB_CONFIGFS_NCM depends on NET select USB_U_ETHER select USB_F_NCM - select CRC32 help NCM is an advanced protocol for Ethernet encapsulation, allows grouping of several ethernet frames into one USB transfer and @@ -370,7 +369,6 @@ config USB_CONFIGFS_EEM depends on NET select USB_U_ETHER select USB_F_EEM - select CRC32 help CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM and therefore can be supported by more hardware. Technically ECM and diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 68068cfb7278..f65b78f389a0 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -315,11 +315,8 @@ int usb_function_deactivate(struct usb_function *function) spin_lock_irqsave(&cdev->lock, flags); - if (cdev->deactivations == 0) { - spin_unlock_irqrestore(&cdev->lock, flags); + if (cdev->deactivations == 0) status = usb_gadget_deactivate(cdev->gadget); - spin_lock_irqsave(&cdev->lock, flags); - } if (status == 0) cdev->deactivations++; @@ -350,11 +347,8 @@ int usb_function_activate(struct usb_function *function) status = -EINVAL; else { cdev->deactivations--; - if (cdev->deactivations == 0) { - spin_unlock_irqrestore(&cdev->lock, flags); + if (cdev->deactivations == 0) status = usb_gadget_activate(cdev->gadget); - spin_lock_irqsave(&cdev->lock, flags); - } } spin_unlock_irqrestore(&cdev->lock, flags); @@ -1078,7 +1072,7 @@ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) while (*sp) { s = *sp; language = cpu_to_le16(s->language); - for (tmp = buf; *tmp && tmp < &buf[USB_MAX_STRING_LEN]; tmp++) { + for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { if (*tmp == language) goto repeat; } @@ -1153,7 +1147,7 @@ static int get_string(struct usb_composite_dev *cdev, collect_langs(sp, s->wData); } - for (len = 0; len <= USB_MAX_STRING_LEN && s->wData[len]; len++) + for (len = 0; len <= 126 && s->wData[len]; len++) continue; if (!len) return -EINVAL; @@ -1629,18 +1623,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) struct usb_function *f = NULL; u8 endp; - if (w_length > USB_COMP_EP0_BUFSIZ) { - if (ctrl->bRequestType & USB_DIR_IN) { - /* Cast away the const, we are going to overwrite on purpose. */ - __le16 *temp = (__le16 *)&ctrl->wLength; - - *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ); - w_length = USB_COMP_EP0_BUFSIZ; - } else { - goto done; - } - } - /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. @@ -1941,9 +1923,6 @@ unknown: if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; - if (interface >= MAX_CONFIG_INTERFACES || - !os_desc_cfg->interface[interface]) - break; buf[6] = w_index; if (w_length == 0x0A) { count = count_ext_prop(os_desc_cfg, @@ -2217,7 +2196,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite, if (!cdev->req) return -ENOMEM; - cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); + cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 2233fd741208..b534c3dde167 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1725,7 +1725,7 @@ static const struct usb_gadget_driver configfs_driver_template = { .suspend = configfs_composite_suspend, .resume = configfs_composite_resume, - .max_speed = USB_SPEED_SUPER_PLUS, + .max_speed = USB_SPEED_SUPER, .driver = { .owner = THIS_MODULE, .name = "configfs-gadget", @@ -1848,7 +1848,7 @@ static struct config_group *gadgets_make( gi->composite.unbind = configfs_do_nothing; gi->composite.suspend = NULL; gi->composite.resume = NULL; - gi->composite.max_speed = USB_SPEED_SUPER_PLUS; + gi->composite.max_speed = USB_SPEED_SUPER; spin_lock_init(&gi->spinlock); mutex_init(&gi->lock); diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 6b139ac2f07e..61dfceb336d6 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -74,7 +73,6 @@ struct acc_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; - struct acc_dev_ref *ref; struct usb_ep *ep_in; struct usb_ep *ep_out; @@ -82,13 +80,13 @@ struct acc_dev { /* online indicates state of function_set_alt & function_unbind * set to 1 when we connect */ - int online; + int online:1; /* disconnected indicates state of open & release * Set to 1 when we disconnect. * Not cleared until our file is closed. */ - int disconnected; + int disconnected:1; /* strings sent by the host */ char manufacturer[ACC_STRING_SIZE]; @@ -245,48 +243,14 @@ static struct usb_gadget_strings *acc_strings[] = { NULL, }; -struct acc_dev_ref { - struct kref kref; - struct acc_dev *acc_dev; -}; - -static struct acc_dev_ref _acc_dev_ref = { - .kref = KREF_INIT(0), -}; +/* temporary variable used between acc_open() and acc_gadget_bind() */ +static struct acc_dev *_acc_dev; struct acc_instance { struct usb_function_instance func_inst; const char *name; }; -static struct acc_dev *get_acc_dev(void) -{ - struct acc_dev_ref *ref = &_acc_dev_ref; - - return kref_get_unless_zero(&ref->kref) ? ref->acc_dev : NULL; -} - -static void __put_acc_dev(struct kref *kref) -{ - struct acc_dev_ref *ref = container_of(kref, struct acc_dev_ref, kref); - struct acc_dev *dev = ref->acc_dev; - - /* Cancel any async work */ - cancel_delayed_work_sync(&dev->start_work); - cancel_work_sync(&dev->hid_work); - - ref->acc_dev = NULL; - kfree(dev); -} - -static void put_acc_dev(struct acc_dev *dev) -{ - struct acc_dev_ref *ref = dev->ref; - - WARN_ON(ref->acc_dev != dev); - kref_put(&ref->kref, __put_acc_dev); -} - static inline struct acc_dev *func_to_dev(struct usb_function *f) { return container_of(f, struct acc_dev, function); @@ -352,10 +316,7 @@ static void acc_set_disconnected(struct acc_dev *dev) static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) { - struct acc_dev *dev = get_acc_dev(); - - if (!dev) - return; + struct acc_dev *dev = _acc_dev; if (req->status == -ESHUTDOWN) { pr_debug("acc_complete_in set disconnected"); @@ -365,15 +326,11 @@ static void acc_complete_in(struct usb_ep *ep, struct usb_request *req) req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); - put_acc_dev(dev); } static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) { - struct acc_dev *dev = get_acc_dev(); - - if (!dev) - return; + struct acc_dev *dev = _acc_dev; dev->rx_done = 1; if (req->status == -ESHUTDOWN) { @@ -382,7 +339,6 @@ static void acc_complete_out(struct usb_ep *ep, struct usb_request *req) } wake_up(&dev->read_wq); - put_acc_dev(dev); } static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req) @@ -847,36 +803,24 @@ static long acc_ioctl(struct file *fp, unsigned code, unsigned long value) static int acc_open(struct inode *ip, struct file *fp) { - struct acc_dev *dev = get_acc_dev(); - - if (!dev) - return -ENODEV; - - if (atomic_xchg(&dev->open_excl, 1)) { - put_acc_dev(dev); + printk(KERN_INFO "acc_open\n"); + if (atomic_xchg(&_acc_dev->open_excl, 1)) return -EBUSY; - } - dev->disconnected = 0; - fp->private_data = dev; + _acc_dev->disconnected = 0; + fp->private_data = _acc_dev; return 0; } static int acc_release(struct inode *ip, struct file *fp) { - struct acc_dev *dev = fp->private_data; - - if (!dev) - return -ENOENT; + printk(KERN_INFO "acc_release\n"); + WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0)); /* indicate that we are disconnected * still could be online so don't touch online flag */ - dev->disconnected = 1; - - fp->private_data = NULL; - WARN_ON(!atomic_xchg(&dev->open_excl, 0)); - put_acc_dev(dev); + _acc_dev->disconnected = 1; return 0; } @@ -932,7 +876,7 @@ static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req) int acc_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { - struct acc_dev *dev = get_acc_dev(); + struct acc_dev *dev = _acc_dev; int value = -EOPNOTSUPP; struct acc_hid_dev *hid; int offset; @@ -949,6 +893,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, */ if (!dev) return -ENODEV; +/* + * printk(KERN_INFO "acc_ctrlrequest " + * "%02x.%02x v%04x i%04x l%u\n", + * b_requestType, b_request, + * w_value, w_index, w_length); + */ if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) { if (b_request == ACCESSORY_START) { @@ -1037,7 +987,6 @@ err: "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); - put_acc_dev(dev); return value; } EXPORT_SYMBOL_GPL(acc_ctrlrequest); @@ -1116,6 +1065,10 @@ kill_all_hid_devices(struct acc_dev *dev) struct list_head *entry, *temp; unsigned long flags; + /* do nothing if usb accessory device doesn't exist */ + if (!dev) + return; + spin_lock_irqsave(&dev->lock, flags); list_for_each_safe(entry, temp, &dev->hid_list) { hid = list_entry(entry, struct acc_hid_dev, list); @@ -1200,15 +1153,12 @@ static void acc_hid_delete(struct acc_hid_dev *hid) static void acc_hid_work(struct work_struct *data) { - struct acc_dev *dev = get_acc_dev(); + struct acc_dev *dev = _acc_dev; struct list_head *entry, *temp; struct acc_hid_dev *hid; struct list_head new_list, dead_list; unsigned long flags; - if (!dev) - return; - INIT_LIST_HEAD(&new_list); spin_lock_irqsave(&dev->lock, flags); @@ -1254,8 +1204,6 @@ static void acc_hid_work(struct work_struct *data) hid_destroy_device(hid->hid); acc_hid_delete(hid); } - - put_acc_dev(dev); } static int acc_function_set_alt(struct usb_function *f, @@ -1312,13 +1260,9 @@ static void acc_function_disable(struct usb_function *f) static int acc_setup(void) { - struct acc_dev_ref *ref = &_acc_dev_ref; struct acc_dev *dev; int ret; - if (kref_read(&ref->kref)) - return -EBUSY; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; @@ -1334,22 +1278,16 @@ static int acc_setup(void) INIT_DELAYED_WORK(&dev->start_work, acc_start_work); INIT_WORK(&dev->hid_work, acc_hid_work); - dev->ref = ref; - if (cmpxchg_relaxed(&ref->acc_dev, NULL, dev)) { - ret = -EBUSY; - goto err_free_dev; - } - ret = misc_register(&acc_device); if (ret) - goto err_zap_ptr; + goto err; + + /* _acc_dev must be set before calling usb_gadget_register_driver */ + _acc_dev = dev; - kref_init(&ref->kref); return 0; -err_zap_ptr: - ref->acc_dev = NULL; -err_free_dev: +err: kfree(dev); pr_err("USB accessory gadget driver failed to initialize\n"); return ret; @@ -1357,24 +1295,16 @@ err_free_dev: void acc_disconnect(void) { - struct acc_dev *dev = get_acc_dev(); - - if (!dev) - return; - /* unregister all HID devices if USB is disconnected */ - kill_all_hid_devices(dev); - put_acc_dev(dev); + kill_all_hid_devices(_acc_dev); } EXPORT_SYMBOL_GPL(acc_disconnect); static void acc_cleanup(void) { - struct acc_dev *dev = get_acc_dev(); - misc_deregister(&acc_device); - put_acc_dev(dev); - put_acc_dev(dev); /* Pairs with kref_init() in acc_setup() */ + kfree(_acc_dev); + _acc_dev = NULL; } static struct acc_instance *to_acc_instance(struct config_item *item) { @@ -1434,6 +1364,7 @@ static void acc_free_inst(struct usb_function_instance *fi) static struct usb_function_instance *acc_alloc_inst(void) { struct acc_instance *fi_acc; + struct acc_dev *dev; int err; fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL); @@ -1445,19 +1376,19 @@ static struct usb_function_instance *acc_alloc_inst(void) err = acc_setup(); if (err) { kfree(fi_acc); + pr_err("Error setting ACCESSORY\n"); return ERR_PTR(err); } config_group_init_type_name(&fi_acc->func_inst.group, "", &acc_func_type); + dev = _acc_dev; return &fi_acc->func_inst; } static void acc_free(struct usb_function *f) { - struct acc_dev *dev = func_to_dev(f); - - put_acc_dev(dev); +/*NO-OP: no function specific resource allocation in mtp_alloc*/ } int acc_ctrlrequest_configfs(struct usb_function *f, @@ -1470,7 +1401,9 @@ int acc_ctrlrequest_configfs(struct usb_function *f, static struct usb_function *acc_alloc(struct usb_function_instance *fi) { - struct acc_dev *dev = get_acc_dev(); + struct acc_dev *dev = _acc_dev; + + pr_info("acc_alloc\n"); dev->function.name = "accessory"; dev->function.strings = acc_strings, diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c index 536e3f72e355..cad35a502d3f 100644 --- a/drivers/usb/gadget/function/f_eem.c +++ b/drivers/usb/gadget/function/f_eem.c @@ -34,11 +34,6 @@ struct f_eem { u8 ctrl_id; }; -struct in_context { - struct sk_buff *skb; - struct usb_ep *ep; -}; - static inline struct f_eem *func_to_eem(struct usb_function *f) { return container_of(f, struct f_eem, port.func); @@ -332,12 +327,9 @@ fail: static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) { - struct in_context *ctx = req->context; + struct sk_buff *skb = (struct sk_buff *)req->context; - dev_kfree_skb_any(ctx->skb); - kfree(req->buf); - usb_ep_free_request(ctx->ep, req); - kfree(ctx); + dev_kfree_skb_any(skb); } /* @@ -421,9 +413,7 @@ static int eem_unwrap(struct gether *port, * b15: bmType (0 == data, 1 == command) */ if (header & BIT(15)) { - struct usb_request *req; - struct in_context *ctx; - struct usb_ep *ep; + struct usb_request *req = cdev->req; u16 bmEEMCmd; /* EEM command packet format: @@ -452,36 +442,11 @@ static int eem_unwrap(struct gether *port, skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); - - ep = port->in_ep; - req = usb_ep_alloc_request(ep, GFP_ATOMIC); - if (!req) { - dev_kfree_skb_any(skb2); - goto next; - } - - req->buf = kmalloc(skb2->len, GFP_KERNEL); - if (!req->buf) { - usb_ep_free_request(ep, req); - dev_kfree_skb_any(skb2); - goto next; - } - - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - kfree(req->buf); - usb_ep_free_request(ep, req); - dev_kfree_skb_any(skb2); - goto next; - } - ctx->skb = skb2; - ctx->ep = ep; - skb_copy_bits(skb2, 0, req->buf, skb2->len); req->length = skb2->len; req->complete = eem_cmd_complete; req->zero = 1; - req->context = ctx; + req->context = skb2; if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) DBG(cdev, "echo response queue fail\n"); break; @@ -533,7 +498,7 @@ static int eem_unwrap(struct gether *port, skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) { DBG(cdev, "unable to unframe EEM packet\n"); - goto next; + continue; } skb_trim(skb2, len - ETH_FCS_LEN); @@ -544,7 +509,7 @@ static int eem_unwrap(struct gether *port, if (unlikely(!skb3)) { DBG(cdev, "unable to realign EEM packet\n"); dev_kfree_skb_any(skb2); - goto next; + continue; } dev_kfree_skb_any(skb2); skb_queue_tail(list, skb3); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 3b27b2a69cad..83c9d5c1de77 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -624,7 +624,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file) file->private_data = ffs; ffs_data_opened(ffs); - return stream_open(inode, file); + return 0; } static int ffs_ep0_release(struct inode *inode, struct file *file) @@ -1130,7 +1130,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); - return stream_open(inode, file); + return 0; } static int ffs_aio_cancel(struct kiocb *kiocb) @@ -1313,7 +1313,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, case FUNCTIONFS_ENDPOINT_DESC: { int desc_idx; - struct usb_endpoint_descriptor desc1, *desc; + struct usb_endpoint_descriptor *desc; switch (epfile->ffs->gadget->speed) { case USB_SPEED_SUPER: @@ -1325,12 +1325,10 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, default: desc_idx = 0; } - desc = epfile->ep->descs[desc_idx]; - memcpy(&desc1, desc, desc->bLength); spin_unlock_irq(&epfile->ffs->eps_lock); - ret = copy_to_user((void *)value, &desc1, desc1.bLength); + ret = copy_to_user((void *)value, desc, sizeof(*desc)); if (ret) ret = -EFAULT; return ret; @@ -1847,15 +1845,11 @@ static void ffs_data_clear(struct ffs_data *ffs) __func__, ffs, ffs->gadget, ffs->flags); BUG_ON(ffs->gadget); - if (ffs->epfiles) { + if (ffs->epfiles) ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); - ffs->epfiles = NULL; - } - if (ffs->ffs_eventfd) { + if (ffs->ffs_eventfd) eventfd_ctx_put(ffs->ffs_eventfd); - ffs->ffs_eventfd = NULL; - } kfree(ffs->raw_descs_data); kfree(ffs->raw_strings); @@ -1874,6 +1868,7 @@ static void ffs_data_reset(struct ffs_data *ffs) ffs_data_clear(ffs); + ffs->epfiles = NULL; ffs->raw_descs_data = NULL; ffs->raw_descs = NULL; ffs->raw_strings = NULL; @@ -2809,7 +2804,6 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, do { /* lang_count > 0 so we can use do-while */ unsigned needed = needed_count; - u32 str_per_lang = str_count; if (unlikely(len < 3)) goto error_free; @@ -2845,7 +2839,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs, data += length + 1; len -= length + 1; - } while (--str_per_lang); + } while (--str_count); s->id = 0; /* terminator */ s->s = NULL; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index e01d20939449..fef3c11d84c3 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -92,7 +92,7 @@ static struct usb_interface_descriptor hidg_interface_desc = { static struct hid_descriptor hidg_desc = { .bLength = sizeof hidg_desc, .bDescriptorType = HID_DT_HID, - .bcdHID = cpu_to_le16(0x0101), + .bcdHID = 0x0101, .bCountryCode = 0x00, .bNumDescriptors = 0x1, /*.desc[0].bDescriptorType = DYNAMIC */ diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index a1332f77f173..a32a6a74f83d 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -585,7 +585,7 @@ static void ncm_do_notify(struct f_ncm *ncm) data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget)); data[1] = data[0]; - DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget)); + DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget)); ncm->notify_state = NCM_NOTIFY_CONNECT; break; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 2f509f8bcd4b..99a35c1a1cfb 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1116,7 +1116,6 @@ fail_tx_reqs: printer_req_free(dev->in_ep, req); } - usb_free_all_descriptors(f); return ret; } diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index 9e1b838ce86f..63f4be80b651 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -101,10 +101,8 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static unsigned int bitrate(struct usb_gadget *g) { - if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) - return 4250000000U; if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) - return 3750000000U; + return 13 * 1024 * 8 * 1000 * 8; else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 151643c378ad..77681c43318d 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -592,7 +592,6 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, for (i = 0; i < 8; i++) { if (is_iso) { switch (speed) { - case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: size = ss->isoc_maxpacket * (ss->isoc_mult + 1) * diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 59de3f246f42..9b91b5c0584e 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -278,7 +278,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - /* .wMaxPacketSize = DYNAMIC */ + .wMaxPacketSize = cpu_to_le16(1023), .bInterval = 1, }; @@ -287,7 +287,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - /* .wMaxPacketSize = DYNAMIC */ + .wMaxPacketSize = cpu_to_le16(1024), .bInterval = 4, }; @@ -355,7 +355,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - /* .wMaxPacketSize = DYNAMIC */ + .wMaxPacketSize = cpu_to_le16(1023), .bInterval = 1, }; @@ -364,7 +364,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = { .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, - /* .wMaxPacketSize = DYNAMIC */ + .wMaxPacketSize = cpu_to_le16(1024), .bInterval = 4, }; @@ -451,28 +451,12 @@ struct cntrl_range_lay3 { __le32 dRES; } __packed; -static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, +static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, struct usb_endpoint_descriptor *ep_desc, - enum usb_device_speed speed, bool is_playback) + unsigned int factor, bool is_playback) { int chmask, srate, ssize; - u16 max_size_bw, max_size_ep; - unsigned int factor; - - switch (speed) { - case USB_SPEED_FULL: - max_size_ep = 1023; - factor = 1000; - break; - - case USB_SPEED_HIGH: - max_size_ep = 1024; - factor = 8000; - break; - - default: - return -EINVAL; - } + u16 max_packet_size; if (is_playback) { chmask = uac2_opts->p_chmask; @@ -484,12 +468,10 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, ssize = uac2_opts->c_ssize; } - max_size_bw = num_channels(chmask) * ssize * - ((srate / (factor / (1 << (ep_desc->bInterval - 1)))) + 1); - ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw, - max_size_ep)); - - return 0; + max_packet_size = num_channels(chmask) * ssize * + DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1))); + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size, + le16_to_cpu(ep_desc->wMaxPacketSize))); } static int @@ -572,33 +554,10 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) uac2->as_in_alt = 0; /* Calculate wMaxPacketSize according to audio bandwidth */ - ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL, - true); - if (ret < 0) { - dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; - } - - ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL, - false); - if (ret < 0) { - dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; - } - - ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH, - true); - if (ret < 0) { - dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; - } - - ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH, - false); - if (ret < 0) { - dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); - return ret; - } + set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true); + set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false); + set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true); + set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false); agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); if (!agdev->out_ep) { diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 89da34ef7b3f..f8a1881609a2 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -625,12 +625,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) uvc_hs_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); - - /* A high-bandwidth endpoint must specify a bInterval value of 1 */ - if (max_packet_mult > 1) - uvc_hs_streaming_ep.bInterval = 1; - else - uvc_hs_streaming_ep.bInterval = opts->streaming_interval; + uvc_hs_streaming_ep.bInterval = opts->streaming_interval; uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); uvc_ss_streaming_ep.bInterval = opts->streaming_interval; diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index c5c8a7cc8f1d..1d13d79d5070 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -650,17 +650,14 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || - (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) - return -EINVAL; - r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index f9ab75c4a311..34a337888788 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -621,9 +621,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, } spin_unlock_irqrestore(&dev->lock, flags); - if (!in) { - if (skb) - dev_kfree_skb_any(skb); + if (skb && !in) { + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c index 4fe499cf5b77..64b2cbb0bc6b 100644 --- a/drivers/usb/gadget/legacy/acm_ms.c +++ b/drivers/usb/gadget/legacy/acm_ms.c @@ -207,10 +207,8 @@ static int acm_ms_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) { - status = -ENOMEM; + if (!usb_desc) goto fail_string_ids; - } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c index e8818ad973e4..99ca3dabc4f3 100644 --- a/drivers/usb/gadget/legacy/dbgp.c +++ b/drivers/usb/gadget/legacy/dbgp.c @@ -136,7 +136,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep) goto fail_1; } - req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL); + req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL); if (!req->buf) { err = -ENOMEM; stp = 2; @@ -344,19 +344,6 @@ static int dbgp_setup(struct usb_gadget *gadget, void *data = NULL; u16 len = 0; - if (length > DBGP_REQ_LEN) { - if (ctrl->bRequestType & USB_DIR_IN) { - /* Cast away the const, we are going to overwrite on purpose. */ - __le16 *temp = (__le16 *)&ctrl->wLength; - - *temp = cpu_to_le16(DBGP_REQ_LEN); - length = DBGP_REQ_LEN; - } else { - return err; - } - } - - if (request == USB_REQ_GET_DESCRIPTOR) { switch (value>>8) { case USB_DT_DEVICE: diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index 0b7229678b53..31e9160223e9 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -407,10 +407,8 @@ static int eth_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) { - status = -ENOMEM; + if (!usb_desc) goto fail1; - } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c index 5cb3359cf126..7e5d2c48476e 100644 --- a/drivers/usb/gadget/legacy/hid.c +++ b/drivers/usb/gadget/legacy/hid.c @@ -103,10 +103,8 @@ static int do_config(struct usb_configuration *c) list_for_each_entry(e, &hidg_func_list, node) { e->f = usb_get_function(e->fi); - if (IS_ERR(e->f)) { - status = PTR_ERR(e->f); + if (IS_ERR(e->f)) goto put; - } status = usb_add_function(c, e->f); if (status < 0) { usb_put_function(e->f); @@ -177,10 +175,8 @@ static int hid_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) { - status = -ENOMEM; + if (!usb_desc) goto put; - } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index a069150d97f3..b95900168a6b 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -113,8 +113,6 @@ enum ep0_state { /* enough for the whole queue: most events invalidate others */ #define N_EVENT 5 -#define RBUF_SIZE 256 - struct dev_data { spinlock_t lock; atomic_t count; @@ -148,7 +146,7 @@ struct dev_data { struct dentry *dentry; /* except this scratch i/o buffer for ep0 */ - u8 rbuf[RBUF_SIZE]; + u8 rbuf [256]; }; static inline void get_dev (struct dev_data *data) @@ -1334,18 +1332,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); - if (w_length > RBUF_SIZE) { - if (ctrl->bRequestType & USB_DIR_IN) { - /* Cast away the const, we are going to overwrite on purpose. */ - __le16 *temp = (__le16 *)&ctrl->wLength; - - *temp = cpu_to_le16(RBUF_SIZE); - w_length = RBUF_SIZE; - } else { - return value; - } - } - spin_lock (&dev->lock); dev->setup_abort = 0; if (dev->state == STATE_DEV_UNCONNECTED) { @@ -1851,9 +1837,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; if (dev->buf) { - spin_unlock_irq(&dev->lock); kfree(kbuf); - return value; + goto fail; } dev->buf = kbuf; @@ -1901,8 +1886,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = usb_gadget_probe_driver(&gadgetfs_driver); if (value != 0) { - spin_lock_irq(&dev->lock); - goto fail; + kfree (dev->buf); + dev->buf = NULL; } else { /* at this point "good" hardware has for the first time * let the USB the host see us. alternatively, if users @@ -1918,9 +1903,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return value; fail: - dev->config = NULL; - dev->hs_config = NULL; - dev->dev = NULL; spin_unlock_irq (&dev->lock); pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index ee7afba5ca70..cdbff54e07ac 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -265,7 +265,6 @@ config USB_AMD5536UDC config USB_FSL_QE tristate "Freescale QE/CPM USB Device Controller" depends on FSL_SOC && (QUICC_ENGINE || CPM) - depends on !64BIT || BROKEN help Some of Freescale PowerPC processors have a Full Speed QE/CPM2 USB controller, which support device mode with 4 diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c index 2da281a743b8..d0d18947f58b 100644 --- a/drivers/usb/gadget/udc/at91_udc.c +++ b/drivers/usb/gadget/udc/at91_udc.c @@ -1898,9 +1898,7 @@ static int at91udc_probe(struct platform_device *pdev) clk_disable(udc->iclk); /* request UDC and maybe VBUS irqs */ - udc->udp_irq = retval = platform_get_irq(pdev, 0); - if (retval < 0) - goto err_unprepare_iclk; + udc->udp_irq = platform_get_irq(pdev, 0); retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0, driver_name, udc); if (retval) { diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig index 778df4badf88..0d7b8c9f72fd 100644 --- a/drivers/usb/gadget/udc/bdc/Kconfig +++ b/drivers/usb/gadget/udc/bdc/Kconfig @@ -14,7 +14,7 @@ if USB_BDC_UDC comment "Platform Support" config USB_BDC_PCI tristate "BDC support for PCIe based platforms" - depends on PCI && BROKEN + depends on PCI default USB_BDC_UDC help Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index c468c64bc3a0..0321b9ce9faf 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -920,21 +920,6 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value) spin_lock_irqsave(&dum->lock, flags); dum->pullup = (value != 0); set_link_state(dum_hcd); - if (value == 0) { - /* - * Emulate synchronize_irq(): wait for callbacks to finish. - * This seems to be the best place to emulate the call to - * synchronize_irq() that's in usb_gadget_remove_driver(). - * Doing it in dummy_udc_stop() would be too late since it - * is called after the unbind callback and unbind shouldn't - * be invoked until all the other callbacks are finished. - */ - while (dum->callback_usage > 0) { - spin_unlock_irqrestore(&dum->lock, flags); - usleep_range(1000, 2000); - spin_lock_irqsave(&dum->lock, flags); - } - } spin_unlock_irqrestore(&dum->lock, flags); usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); @@ -1015,6 +1000,14 @@ static int dummy_udc_stop(struct usb_gadget *g) spin_lock_irq(&dum->lock); dum->ints_enabled = 0; stop_activity(dum); + + /* emulate synchronize_irq(): wait for callbacks to finish */ + while (dum->callback_usage > 0) { + spin_unlock_irq(&dum->lock); + usleep_range(1000, 2000); + spin_lock_irq(&dum->lock); + } + dum->driver = NULL; spin_unlock_irq(&dum->lock); @@ -2748,7 +2741,7 @@ static int __init init(void) { int retval = -ENOMEM; int i; - struct dummy *dum[MAX_NUM_UDC] = {}; + struct dummy *dum[MAX_NUM_UDC]; if (usb_disabled()) return -ENODEV; diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c index 9e102ba9cf66..76e991557116 100644 --- a/drivers/usb/gadget/udc/fotg210-udc.c +++ b/drivers/usb/gadget/udc/fotg210-udc.c @@ -340,16 +340,15 @@ static void fotg210_start_dma(struct fotg210_ep *ep, } else { buffer = req->req.buf + req->req.actual; length = ioread32(ep->fotg210->reg + - FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX; - if (length > req->req.length - req->req.actual) - length = req->req.length - req->req.actual; + FOTG210_FIBCR(ep->epnum - 1)); + length &= FIBCR_BCFX; } } else { buffer = req->req.buf + req->req.actual; if (req->req.length - req->req.actual > ep->ep.maxpacket) length = ep->ep.maxpacket; else - length = req->req.length - req->req.actual; + length = req->req.length; } d = dma_map_single(NULL, buffer, length, @@ -386,7 +385,8 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep, } if (ep->dir_in) { /* if IN */ fotg210_start_dma(ep, req); - if (req->req.length == req->req.actual) + if ((req->req.length == req->req.actual) || + (req->req.actual < ep->ep.maxpacket)) fotg210_done(ep, req, 0); } else { /* OUT */ u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0); @@ -827,7 +827,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210) if (req->req.length) fotg210_start_dma(ep, req); - if (req->req.actual == req->req.length) + if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fotg210_done(ep, req, 0); } else { fotg210_set_cxdone(fotg210); @@ -856,16 +856,12 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep) { struct fotg210_request *req = list_entry(ep->queue.next, struct fotg210_request, queue); - int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1); fotg210_start_dma(ep, req); - /* Complete the request when it's full or a short packet arrived. - * Like other drivers, short_not_ok isn't handled. - */ - + /* finish out transfer */ if (req->req.length == req->req.actual || - (disgr1 & DISGR1_SPK_INT(ep->epnum - 1))) + req->req.actual < ep->ep.maxpacket) fotg210_done(ep, req, 0); } @@ -1038,12 +1034,6 @@ static void fotg210_init(struct fotg210_udc *fotg210) value &= ~DMCR_GLINT_EN; iowrite32(value, fotg210->reg + FOTG210_DMCR); - /* enable only grp2 irqs we handle */ - iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT - | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT - | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT), - fotg210->reg + FOTG210_DMISGR2); - /* disable all fifo interrupt */ iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1); diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c index 9ee4a2605dea..dafe74eb9ade 100644 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ b/drivers/usb/gadget/udc/mv_u3d_core.c @@ -1929,6 +1929,14 @@ static int mv_u3d_probe(struct platform_device *dev) goto err_get_irq; } u3d->irq = r->start; + if (request_irq(u3d->irq, mv_u3d_irq, + IRQF_SHARED, driver_name, u3d)) { + u3d->irq = 0; + dev_err(&dev->dev, "Request irq %d for u3d failed\n", + u3d->irq); + retval = -ENODEV; + goto err_request_irq; + } /* initialize gadget structure */ u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */ @@ -1941,15 +1949,6 @@ static int mv_u3d_probe(struct platform_device *dev) mv_u3d_eps_init(u3d); - if (request_irq(u3d->irq, mv_u3d_irq, - IRQF_SHARED, driver_name, u3d)) { - u3d->irq = 0; - dev_err(&dev->dev, "Request irq %d for u3d failed\n", - u3d->irq); - retval = -ENODEV; - goto err_request_irq; - } - /* external vbus detection */ if (u3d->vbus) { u3d->clock_gating = 1; @@ -1973,8 +1972,8 @@ static int mv_u3d_probe(struct platform_device *dev) err_unregister: free_irq(u3d->irq, u3d); -err_get_irq: err_request_irq: +err_get_irq: kfree(u3d->status_req); err_alloc_status_req: kfree(u3d->eps); diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 5301de1c5d31..3fd603494e86 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -615,22 +615,18 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev) static inline void pch_udc_vbus_session(struct pch_udc_dev *dev, int is_active) { - unsigned long iflags; - - spin_lock_irqsave(&dev->lock, iflags); if (is_active) { pch_udc_reconnect(dev); dev->vbus_session = 1; } else { if (dev->driver && dev->driver->disconnect) { - spin_unlock_irqrestore(&dev->lock, iflags); + spin_lock(&dev->lock); dev->driver->disconnect(&dev->gadget); - spin_lock_irqsave(&dev->lock, iflags); + spin_unlock(&dev->lock); } pch_udc_set_disconnect(dev); dev->vbus_session = 0; } - spin_unlock_irqrestore(&dev->lock, iflags); } /** @@ -1187,25 +1183,20 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value) static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on) { struct pch_udc_dev *dev; - unsigned long iflags; if (!gadget) return -EINVAL; - dev = container_of(gadget, struct pch_udc_dev, gadget); - - spin_lock_irqsave(&dev->lock, iflags); if (is_on) { pch_udc_reconnect(dev); } else { if (dev->driver && dev->driver->disconnect) { - spin_unlock_irqrestore(&dev->lock, iflags); + spin_lock(&dev->lock); dev->driver->disconnect(&dev->gadget); - spin_lock_irqsave(&dev->lock, iflags); + spin_unlock(&dev->lock); } pch_udc_set_disconnect(dev); } - spin_unlock_irqrestore(&dev->lock, iflags); return 0; } @@ -1797,7 +1788,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep, } /* prevent from using desc. - set HOST BUSY */ dma_desc->status |= PCH_UDC_BS_HST_BSY; - dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID); + dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID); req->td_data = dma_desc; req->td_data_last = dma_desc; req->chain_len = 1; @@ -2341,21 +2332,6 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num) pch_udc_set_dma(dev, DMA_DIR_RX); } -static int pch_udc_gadget_setup(struct pch_udc_dev *dev) - __must_hold(&dev->lock) -{ - int rc; - - /* In some cases we can get an interrupt before driver gets setup */ - if (!dev->driver) - return -ESHUTDOWN; - - spin_unlock(&dev->lock); - rc = dev->driver->setup(&dev->gadget, &dev->setup_data); - spin_lock(&dev->lock); - return rc; -} - /** * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts * @dev: Reference to the device structure @@ -2427,12 +2403,15 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev) dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep; else /* OUT */ dev->gadget.ep0 = &ep->ep; + spin_lock(&dev->lock); /* If Mass storage Reset */ if ((dev->setup_data.bRequestType == 0x21) && (dev->setup_data.bRequest == 0xFF)) dev->prot_stall = 0; /* call gadget with setup data received */ - setup_supported = pch_udc_gadget_setup(dev); + setup_supported = dev->driver->setup(&dev->gadget, + &dev->setup_data); + spin_unlock(&dev->lock); if (dev->setup_data.bRequestType & USB_DIR_IN) { ep->td_data->status = (ep->td_data->status & @@ -2685,7 +2664,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev) dev->ep[i].halted = 0; } dev->stall = 0; - pch_udc_gadget_setup(dev); + spin_unlock(&dev->lock); + dev->driver->setup(&dev->gadget, &dev->setup_data); + spin_lock(&dev->lock); } /** @@ -2720,7 +2701,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev) dev->stall = 0; /* call gadget zero with setup data received */ - pch_udc_gadget_setup(dev); + spin_unlock(&dev->lock); + dev->driver->setup(&dev->gadget, &dev->setup_data); + spin_lock(&dev->lock); } /** @@ -3003,7 +2986,7 @@ static int init_dma_pools(struct pch_udc_dev *dev) dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf, UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE); - return dma_mapping_error(&dev->pdev->dev, dev->dma_addr); + return 0; } static int pch_udc_start(struct usb_gadget *g, diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c index ffe6cb4c78ff..e34094647603 100644 --- a/drivers/usb/gadget/udc/r8a66597-udc.c +++ b/drivers/usb/gadget/udc/r8a66597-udc.c @@ -1253,7 +1253,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl) do { tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ; udelay(1); - } while (tmp != CS_IDST && timeout-- > 0); + } while (tmp != CS_IDST || timeout-- > 0); if (tmp == CS_IDST) r8a66597_bset(r8a66597, @@ -1867,8 +1867,6 @@ static int r8a66597_probe(struct platform_device *pdev) return PTR_ERR(reg); ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) - return -EINVAL; irq = ires->start; irq_trigger = ires->flags & IRQF_TRIGGER_MASK; diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 3454e263fd82..21a7915d6a4a 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -618,13 +618,10 @@ static ssize_t usb_udc_softconn_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); - ssize_t ret; - mutex_lock(&udc_lock); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); - ret = -EOPNOTSUPP; - goto out; + return -EOPNOTSUPP; } if (sysfs_streq(buf, "connect")) { @@ -636,14 +633,10 @@ static ssize_t usb_udc_softconn_store(struct device *dev, usb_gadget_udc_stop(udc); } else { dev_err(dev, "unsupported command '%s'\n", buf); - ret = -EINVAL; - goto out; + return -EINVAL; } - ret = n; -out: - mutex_unlock(&udc_lock); - return ret; + return n; } static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index b3168725b8a9..1cbb0ac6b182 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1622,8 +1622,6 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; - if (epnum >= XUSB_MAX_ENDPOINTS) - goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1691,10 +1689,6 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; - if (endpoint >= XUSB_MAX_ENDPOINTS) { - xudc_ep0_stall(udc); - return; - } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 0173a9969b9a..73a4dfba0edb 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -59,9 +59,9 @@ usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) return -EINVAL; /* string descriptors have length, tag, then UTF16-LE text */ - len = min((size_t)USB_MAX_STRING_LEN, strlen(s->s)); + len = min ((size_t) 126, strlen (s->s)); len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, - (wchar_t *) &buf[2], USB_MAX_STRING_LEN); + (wchar_t *) &buf[2], 126); if (len < 0) return -EINVAL; buf [0] = (len + 1) * 2; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 56a32d4e2cbc..3e51c0a51049 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -575,7 +575,6 @@ static int ehci_run (struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 hcc_params; - int rc; hcd->uses_new_polling = 1; @@ -631,20 +630,9 @@ static int ehci_run (struct usb_hcd *hcd) down_write(&ehci_cf_port_reset_rwsem); ehci->rh_state = EHCI_RH_RUNNING; ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); - - /* Wait until HC become operational */ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ msleep(5); - rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); - up_write(&ehci_cf_port_reset_rwsem); - - if (rc) { - ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", - ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); - return rc; - } - ehci->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 8e0625cc1801..cccde8217f28 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -237,7 +237,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) err_pm_runtime: pm_runtime_put_sync(dev); - pm_runtime_disable(dev); err_phy: for (i = 0; i < omap->nports; i++) { diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 3eecf47d4e89..ee8d5faa0194 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -218,11 +218,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) * the clock does not exists. */ priv->clk = devm_clk_get(&pdev->dev, NULL); - if (!IS_ERR(priv->clk)) { - err = clk_prepare_enable(priv->clk); - if (err) - goto err_put_hcd; - } + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); priv->phy = devm_phy_optional_get(&pdev->dev, "usb"); if (IS_ERR(priv->phy)) { @@ -283,7 +280,6 @@ err_phy_init: err_phy_get: if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); -err_put_hcd: usb_put_hcd(hcd); err: dev_err(&pdev->dev, "init %s fail, %d\n", diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index d1e2667918e1..33e9ed7fe026 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -312,9 +312,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) ; /* ConneXT has no sbrn register */ - else if (pdev->vendor == PCI_VENDOR_ID_HUAWEI - && pdev->device == 0xa239) - ; /* HUAWEI Kunpeng920 USB EHCI has no sbrn register */ else pci_read_config_byte(pdev, 0x60, &ehci->sbrn); diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index e081392ec830..11b3a8c57eab 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -4495,12 +4495,13 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd) /* HC need not update length with this error */ if (!(t & FOTG210_ISOC_BABBLE)) { - desc->actual_length = FOTG210_ITD_LENGTH(t); + desc->actual_length = + fotg210_itdlen(urb, desc, t); urb->actual_length += desc->actual_length; } } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) { desc->status = 0; - desc->actual_length = FOTG210_ITD_LENGTH(t); + desc->actual_length = fotg210_itdlen(urb, desc, t); urb->actual_length += desc->actual_length; } else { /* URB was too late */ @@ -5609,7 +5610,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) struct usb_hcd *hcd; struct resource *res; int irq; - int retval; + int retval = -ENODEV; struct fotg210_hcd *fotg210; if (usb_disabled()) @@ -5629,7 +5630,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev, dev_name(dev)); if (!hcd) { - dev_err(dev, "failed to create hcd\n"); + dev_err(dev, "failed to create hcd with err %d\n", retval); retval = -ENOMEM; goto fail_create_hcd; } diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h index 1a3f94123c88..b5cfa7aeb277 100644 --- a/drivers/usb/host/fotg210.h +++ b/drivers/usb/host/fotg210.h @@ -682,6 +682,11 @@ static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210) return fotg210_readl(fotg210, &fotg210->regs->frame_index); } +#define fotg210_itdlen(urb, desc, t) ({ \ + usb_pipein((urb)->pipe) ? \ + (desc)->length - FOTG210_ITD_LENGTH(t) : \ + FOTG210_ITD_LENGTH(t); \ +}) /*-------------------------------------------------------------------------*/ #endif /* __LINUX_FOTG210_H */ diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index f30e661129c2..bd98706d1ce9 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -121,6 +121,8 @@ struct max3421_hcd { struct task_struct *spi_thread; + struct max3421_hcd *next; + enum max3421_rh_state rh_state; /* lower 16 bits contain port status, upper 16 bits the change mask: */ u32 port_status; @@ -147,6 +149,8 @@ struct max3421_hcd { */ struct urb *curr_urb; enum scheduling_pass sched_pass; + struct usb_device *loaded_dev; /* dev that's loaded into the chip */ + int loaded_epnum; /* epnum whose toggles are loaded */ int urb_done; /* > 0 -> no errors, < 0: errno */ size_t curr_len; u8 hien; @@ -168,6 +172,8 @@ struct max3421_ep { u8 retransmit; /* packet needs retransmission */ }; +static struct max3421_hcd *max3421_hcd_list; + #define MAX3421_FIFO_SIZE 64 #define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */ @@ -482,17 +488,39 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) * Caller must NOT hold HCD spinlock. */ static void -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, + int force_toggles) { - int rcvtog, sndtog; + struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); + int old_epnum, same_ep, rcvtog, sndtog; + struct usb_device *old_dev; u8 hctl; + old_dev = max3421_hcd->loaded_dev; + old_epnum = max3421_hcd->loaded_epnum; + + same_ep = (dev == old_dev && epnum == old_epnum); + if (same_ep && !force_toggles) + return; + + if (old_dev && !same_ep) { + /* save the old end-points toggles: */ + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); + + rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; + sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; + + /* no locking: HCD (i.e., we) own toggles, don't we? */ + usb_settoggle(old_dev, old_epnum, 0, rcvtog); + usb_settoggle(old_dev, old_epnum, 1, sndtog); + } /* setup new endpoint's toggle bits: */ rcvtog = usb_gettoggle(dev, epnum, 0); sndtog = usb_gettoggle(dev, epnum, 1); hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); + max3421_hcd->loaded_epnum = epnum; spi_wr8(hcd, MAX3421_REG_HCTL, hctl); /* @@ -500,6 +528,7 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) * address-assignment so it's best to just always load the * address whenever the end-point changed/was forced. */ + max3421_hcd->loaded_dev = dev; spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); } @@ -634,7 +663,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); struct urb *urb, *curr_urb = NULL; struct max3421_ep *max3421_ep; - int epnum; + int epnum, force_toggles = 0; struct usb_host_endpoint *ep; struct list_head *pos; unsigned long flags; @@ -744,6 +773,7 @@ done: usb_settoggle(urb->dev, epnum, 0, 1); usb_settoggle(urb->dev, epnum, 1, 1); max3421_ep->pkt_state = PKT_STATE_SETUP; + force_toggles = 1; } else max3421_ep->pkt_state = PKT_STATE_TRANSFER; } @@ -751,7 +781,7 @@ done: spin_unlock_irqrestore(&max3421_hcd->lock, flags); max3421_ep->last_active = max3421_hcd->frame_number; - max3421_set_address(hcd, urb->dev, epnum); + max3421_set_address(hcd, urb->dev, epnum, force_toggles); max3421_set_speed(hcd, urb->dev); max3421_next_transfer(hcd, 0); return 1; @@ -1352,16 +1382,6 @@ max3421_urb_done(struct usb_hcd *hcd) status = 0; urb = max3421_hcd->curr_urb; if (urb) { - /* save the old end-points toggles: */ - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); - int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; - int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; - int epnum = usb_endpoint_num(&urb->ep->desc); - - /* no locking: HCD (i.e., we) own toggles, don't we? */ - usb_settoggle(urb->dev, epnum, 0, rcvtog); - usb_settoggle(urb->dev, epnum, 1, sndtog); - max3421_hcd->curr_urb = NULL; spin_lock_irqsave(&max3421_hcd->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); @@ -1837,8 +1857,9 @@ max3421_probe(struct spi_device *spi) } set_bit(HCD_FLAG_POLL_RH, &hcd->flags); max3421_hcd = hcd_to_max3421(hcd); + max3421_hcd->next = max3421_hcd_list; + max3421_hcd_list = max3421_hcd; INIT_LIST_HEAD(&max3421_hcd->ep_list); - spi_set_drvdata(spi, max3421_hcd); max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL); if (!max3421_hcd->tx) { @@ -1887,18 +1908,28 @@ error: static int max3421_remove(struct spi_device *spi) { - struct max3421_hcd *max3421_hcd; - struct usb_hcd *hcd; + struct max3421_hcd *max3421_hcd = NULL, **prev; + struct usb_hcd *hcd = NULL; unsigned long flags; - max3421_hcd = spi_get_drvdata(spi); - hcd = max3421_to_hcd(max3421_hcd); + for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) { + max3421_hcd = *prev; + hcd = max3421_to_hcd(max3421_hcd); + if (hcd->self.controller == &spi->dev) + break; + } + if (!max3421_hcd) { + dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n", + spi); + return -ENODEV; + } usb_remove_hcd(hcd); spin_lock_irqsave(&max3421_hcd->lock, flags); kthread_stop(max3421_hcd->spi_thread); + *prev = max3421_hcd->next; spin_unlock_irqrestore(&max3421_hcd->lock, flags); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 5137b1d5b312..d34b15bf0f41 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -99,7 +99,7 @@ static void io_watchdog_func(unsigned long _ohci); /* Some boards misreport power switching/overcurrent */ -static bool distrust_firmware; +static bool distrust_firmware = 1; module_param (distrust_firmware, bool, 0); MODULE_PARM_DESC (distrust_firmware, "true to distrust firmware power/overcurrent setup"); diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index 4d42ae3b2fd6..cfcfadfc94fc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -199,12 +199,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev) if (usb_disabled()) return -ENODEV; - if (!cell || !regs || !config || !sram) + if (!cell) return -EINVAL; - if (irq < 0) - return irq; - hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev)); if (!hcd) { ret = -ENOMEM; diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index d1e0d9d4e7a6..1f139d82cee0 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -3741,10 +3741,8 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev, oxu->is_otg = otg; ret = usb_add_hcd(hcd, irq, IRQF_SHARED); - if (ret < 0) { - usb_put_hcd(hcd); + if (ret < 0) return ERR_PTR(ret); - } device_wakeup_enable(hcd->self.controller); return hcd; diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 455c59fe32fa..fd2a11473be7 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -1286,10 +1286,11 @@ sl811h_hub_control( goto error; put_unaligned_le32(sl811->port1, buf); - if (__is_defined(VERBOSE) || - *(u16*)(buf+2)) /* only if wPortChange is interesting */ - dev_dbg(hcd->self.controller, "GetPortStatus %08x\n", - sl811->port1); +#ifndef VERBOSE + if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ +#endif + dev_dbg(hcd->self.controller, "GetPortStatus %08x\n", + sl811->port1); break; case SetPortFeature: if (wIndex != 1 || wLength != 0) diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 35ac83e86c94..9fe3225e6c61 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -19,9 +19,8 @@ * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ - -/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */ -#define XHCI_MAX_HALT_USEC (32 * 1000) +/* Up to 16 ms to halt an HC */ +#define XHCI_MAX_HALT_USEC (16*1000) /* HC not running - set to 1 when run/stop bit is cleared. */ #define XHCI_STS_HALT (1<<0) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 7029578ac09f..78d36322b64c 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -156,6 +156,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci, { u16 temp; + desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */ desc->bHubContrCurrent = 0; desc->bNbrPorts = ports; @@ -189,7 +190,6 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, desc->bDescriptorType = USB_DT_HUB; temp = 1 + (ports / 8); desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp; - desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.8 says 20ms */ /* The Device Removable bits are reported on a byte granularity. * If the port doesn't exist within that byte, the bit is set to 0. @@ -240,7 +240,6 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, xhci_common_hub_descriptor(xhci, desc, ports); desc->bDescriptorType = USB_DT_SS_HUB; desc->bDescLength = USB_DT_SS_HUB_SIZE; - desc->bPwrOn2PwrGood = 50; /* usb 3.1 may fail if less than 100ms */ /* header decode latency should be zero for roothubs, * see section 4.23.5.2. @@ -1478,12 +1477,11 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) * Inform the usbcore about resume-in-progress by returning * a non-zero value even if there are no status changes. */ - spin_lock_irqsave(&xhci->lock, flags); - status = bus_state->resuming_ports; mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; + spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ for (i = 0; i < max_ports; i++) { temp = readl(port_array[i]); @@ -1611,10 +1609,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd) hcd->state = HC_STATE_SUSPENDED; bus_state->next_statechange = jiffies + msecs_to_jiffies(10); spin_unlock_irqrestore(&xhci->lock, flags); - - if (bus_state->bus_suspended) - usleep_range(5000, 10000); - return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 2fb202da44b8..5af57afb4e56 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -38,7 +38,6 @@ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 -#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f @@ -125,10 +124,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100) - xhci->quirks |= XHCI_TRUST_TX_LENGTH; - if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9d2cc0de92e1..4c724ae7c7b2 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2831,8 +2831,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[0] = cpu_to_le32(field1); trb->field[1] = cpu_to_le32(field2); trb->field[2] = cpu_to_le32(field3); - /* make sure TRB is fully written before giving it to the controller */ - wmb(); trb->field[3] = cpu_to_le32(field4); inc_enq(xhci, ring, more_trbs_coming); } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 13fb911d67c5..3dadc9634dd0 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1062,7 +1062,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; - bool pending_portevent = false; if (!hcd->state || !xhci->suspended) return 0; @@ -1196,22 +1195,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - /* - * Resume roothubs only if there are pending events. - * USB 3 devices resend U3 LFPS wake after a 100ms delay if - * the first wake signalling failed, give it that chance. - */ - pending_portevent = xhci_pending_portevent(xhci); - if (!pending_portevent) { - msleep(120); - pending_portevent = xhci_pending_portevent(xhci); - } - - if (pending_portevent) { + /* Resume root hubs only when have pending events. */ + if (xhci_pending_portevent(xhci)) { usb_hcd_resume_root_hub(xhci->shared_hcd); usb_hcd_resume_root_hub(hcd); } } + /* * If system is subject to the Quirk, Compliance Mode Timer needs to * be re-initialized Always after a system resume. Ports are subject @@ -1347,7 +1337,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, * we need to issue an evaluate context command and wait on it. */ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, struct urb *urb, gfp_t mem_flags) + unsigned int ep_index, struct urb *urb) { struct xhci_container_ctx *out_ctx; struct xhci_input_control_ctx *ctrl_ctx; @@ -1378,7 +1368,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, * changes max packet sizes. */ - command = xhci_alloc_command(xhci, false, true, mem_flags); + command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); if (!command) return -ENOMEM; @@ -1435,12 +1425,9 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) struct urb_priv *urb_priv; int size, i; - if (!urb) + if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__) <= 0) return -EINVAL; - ret = xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__); - if (ret <= 0) - return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -1488,7 +1475,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) */ if (urb->dev->speed == USB_SPEED_FULL) { ret = xhci_check_maxpacket(xhci, slot_id, - ep_index, urb, mem_flags); + ep_index, urb); if (ret < 0) { xhci_urb_free_priv(urb_priv); urb->hcpriv = NULL; @@ -3101,7 +3088,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return ret ? ret : -EINVAL; + return -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 6f6315082bc4..52c27cab78c3 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -209,7 +209,6 @@ static void ftdi_elan_delete(struct kref *kref) mutex_unlock(&ftdi_module_lock); kfree(ftdi->bulk_in_buffer); ftdi->bulk_in_buffer = NULL; - kfree(ftdi); } static void ftdi_elan_put_kref(struct usb_ftdi *ftdi) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index f4e9592c02d6..83342e579233 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -96,6 +96,10 @@ struct iowarrior { /* globals */ /*--------------*/ +/* + * USB spec identifies 5 second timeouts. + */ +#define GET_TIMEOUT 5 #define USB_REQ_GET_REPORT 0x01 //#if 0 static int usb_get_report(struct usb_device *dev, @@ -107,7 +111,7 @@ static int usb_get_report(struct usb_device *dev, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, (type << 8) + id, inter->desc.bInterfaceNumber, buf, size, - USB_CTRL_GET_TIMEOUT); + GET_TIMEOUT*HZ); } //#endif @@ -122,7 +126,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type, USB_TYPE_CLASS | USB_RECIP_INTERFACE, (type << 8) + id, intf->cur_altsetting->desc.bInterfaceNumber, buf, - size, 1000); + size, HZ); } /*---------------------*/ diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig index 47dabccafef4..36bc28c884ad 100644 --- a/drivers/usb/misc/sisusbvga/Kconfig +++ b/drivers/usb/misc/sisusbvga/Kconfig @@ -15,7 +15,7 @@ config USB_SISUSBVGA config USB_SISUSBVGA_CON bool "Text console and mode switching support" if USB_SISUSBVGA - depends on VT && BROKEN + depends on VT select FONT_8x16 ---help--- Say Y here if you want a VGA text console via the USB dongle or diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index bd256ede6149..4145314a515b 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -74,9 +74,9 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ - 0, NULL, 0, USB_CTRL_SET_TIMEOUT); + 0, NULL, 0, USB_CTRL_GET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 9a4138da3b88..3d750671b85a 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -753,7 +753,6 @@ static int uss720_probe(struct usb_interface *intf, parport_announce_port(pp); usb_set_intfdata(intf, pp); - usb_put_dev(usbdev); return 0; probe_abort: diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index e7761c00eda5..b17aeaafbb7c 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -515,9 +515,6 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); - /* make sure URB is idle after timeout or (spurious) CMD_ACK */ - usb_kill_urb(dev->cntl_urb); - mutex_unlock(&dev->io_mutex); if (retval < 0) { diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index f4297e549595..df7c9f46be54 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -193,7 +193,6 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) } if (len > 0) { /* Write the rest 1 - 3 bytes to FIFO */ - val = 0; memcpy(&val, buf, len); musb_writel(fifo, 0, val); } @@ -1120,11 +1119,6 @@ static int tusb_musb_init(struct musb *musb) /* dma address for async dma */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - pr_debug("no async dma resource?\n"); - ret = -ENODEV; - goto done; - } musb->async = mem->start; /* dma address for sync dma */ diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c index 63798de8b5ae..85d031ce85c1 100644 --- a/drivers/usb/phy/phy-fsl-usb.c +++ b/drivers/usb/phy/phy-fsl-usb.c @@ -891,8 +891,6 @@ int usb_otg_start(struct platform_device *pdev) /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); - if (p_otg->irq < 0) - return p_otg->irq; status = request_irq(p_otg->irq, fsl_otg_isr, IRQF_SHARED, driver_name, p_otg); if (status) { diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c index f333024660b4..b3b33cf7ddf6 100644 --- a/drivers/usb/phy/phy-isp1301.c +++ b/drivers/usb/phy/phy-isp1301.c @@ -136,7 +136,7 @@ static int isp1301_remove(struct i2c_client *client) static struct i2c_driver isp1301_driver = { .driver = { .name = DRV_NAME, - .of_match_table = isp1301_of_match, + .of_match_table = of_match_ptr(isp1301_of_match), }, .probe = isp1301_probe, .remove = isp1301_remove, diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index ec86eedd789b..335a1ef35224 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -404,9 +404,7 @@ static int tahvo_usb_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, tu); - tu->irq = ret = platform_get_irq(pdev, 0); - if (ret < 0) - return ret; + tu->irq = platform_get_irq(pdev, 0); ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, IRQF_ONESHOT, "tahvo-vbus", tu); diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 220e1a59a871..12741856a75c 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c @@ -336,11 +336,6 @@ static int twl6030_usb_probe(struct platform_device *pdev) twl->irq2 = platform_get_irq(pdev, 1); twl->linkstat = OMAP_MUSB_UNKNOWN; - if (twl->irq1 < 0) - return twl->irq1; - if (twl->irq2 < 0) - return twl->irq2; - twl->comparator.set_vbus = twl6030_set_vbus; twl->comparator.start_srp = twl6030_start_srp; diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 793bd764385a..79efb367e5ce 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -115,8 +115,6 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); -static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable); -static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable); struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) { struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); @@ -140,15 +138,8 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) dmaengine_terminate_all(chan); usbhsf_fifo_clear(pipe, fifo); usbhsf_dma_unmap(pkt); - } else { - if (usbhs_pipe_is_dir_in(pipe)) - usbhsf_rx_irq_ctrl(pipe, 0); - else - usbhsf_tx_irq_ctrl(pipe, 0); } - usbhs_pipe_running(pipe, 0); - __usbhsf_pkt_del(pkt); } diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 22249e389c3d..75fb41d4e9fc 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c @@ -805,8 +805,6 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv, void usbhs_pipe_free(struct usbhs_pipe *pipe) { - usbhsp_pipe_select(pipe); - usbhsp_pipe_cfg_set(pipe, 0xFFFF, 0); usbhsp_put_pipe(pipe); } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 6d72a3e9352e..3ec59c2b4f65 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -70,12 +70,10 @@ static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5523) }, + { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, - { USB_DEVICE(0x2184, 0x0057) }, - { USB_DEVICE(0x4348, 0x5523) }, - { USB_DEVICE(0x9986, 0x7523) }, + { USB_DEVICE(0x1a86, 0x5523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 120d38798e4d..205f31200264 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -48,7 +48,6 @@ static void cp210x_release(struct usb_serial *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ @@ -58,7 +57,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ - { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ @@ -66,7 +64,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ - { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ @@ -144,7 +141,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ - { USB_DEVICE(0x10C4, 0x88D8) }, /* Acuity Brands nLight Air Adapter */ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ @@ -154,7 +150,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ - { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ @@ -202,9 +197,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ - { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ - { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ - { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ @@ -232,7 +224,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ - { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */ { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ @@ -261,8 +252,6 @@ static struct usb_serial_driver cp210x_device = { .close = cp210x_close, .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, - .throttle = usb_serial_generic_throttle, - .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_startup, diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index b630048c4988..be93b9ff2d98 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -200,12 +201,14 @@ struct digi_port { int dp_throttle_restart; wait_queue_head_t dp_flush_wait; wait_queue_head_t dp_close_wait; /* wait queue for close */ + struct work_struct dp_wakeup_work; struct usb_serial_port *dp_port; }; /* Local Function Declarations */ +static void digi_wakeup_write_lock(struct work_struct *work); static int digi_write_oob_command(struct usb_serial_port *port, unsigned char *buf, int count, int interruptible); static int digi_write_inb_command(struct usb_serial_port *port, @@ -352,6 +355,26 @@ __releases(lock) return timeout; } + +/* + * Digi Wakeup Write + * + * Wake up port, line discipline, and tty processes sleeping + * on writes. + */ + +static void digi_wakeup_write_lock(struct work_struct *work) +{ + struct digi_port *priv = + container_of(work, struct digi_port, dp_wakeup_work); + struct usb_serial_port *port = priv->dp_port; + unsigned long flags; + + spin_lock_irqsave(&priv->dp_port_lock, flags); + tty_port_tty_wakeup(&port->port); + spin_unlock_irqrestore(&priv->dp_port_lock, flags); +} + /* * Digi Write OOB Command * @@ -963,7 +986,6 @@ static void digi_write_bulk_callback(struct urb *urb) struct digi_serial *serial_priv; int ret = 0; int status = urb->status; - bool wakeup; /* port and serial sanity check */ if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) { @@ -990,7 +1012,6 @@ static void digi_write_bulk_callback(struct urb *urb) } /* try to send any buffered data on this port */ - wakeup = true; spin_lock(&priv->dp_port_lock); priv->dp_write_urb_in_use = 0; if (priv->dp_out_buf_len > 0) { @@ -1006,18 +1027,19 @@ static void digi_write_bulk_callback(struct urb *urb) if (ret == 0) { priv->dp_write_urb_in_use = 1; priv->dp_out_buf_len = 0; - wakeup = false; } } - spin_unlock(&priv->dp_port_lock); + /* wake up processes sleeping on writes immediately */ + tty_port_tty_wakeup(&port->port); + /* also queue up a wakeup at scheduler time, in case we */ + /* lost the race in write_chan(). */ + schedule_work(&priv->dp_wakeup_work); + spin_unlock(&priv->dp_port_lock); if (ret && ret != -EPERM) dev_err_console(port, "%s: usb_submit_urb failed, ret=%d, port=%d\n", __func__, ret, priv->dp_port_num); - - if (wakeup) - tty_port_tty_wakeup(&port->port); } static int digi_write_room(struct tty_struct *tty) @@ -1217,6 +1239,7 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num) init_waitqueue_head(&priv->dp_transmit_idle_wait); init_waitqueue_head(&priv->dp_flush_wait); init_waitqueue_head(&priv->dp_close_wait); + INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); priv->dp_port = port; init_waitqueue_head(&port->write_wait); @@ -1502,14 +1525,13 @@ static int digi_read_oob_callback(struct urb *urb) rts = tty->termios.c_cflag & CRTSCTS; if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) { - bool wakeup = false; - spin_lock(&priv->dp_port_lock); /* convert from digi flags to termiox flags */ if (val & DIGI_READ_INPUT_SIGNALS_CTS) { priv->dp_modem_signals |= TIOCM_CTS; + /* port must be open to use tty struct */ if (rts) - wakeup = true; + tty_port_tty_wakeup(&port->port); } else { priv->dp_modem_signals &= ~TIOCM_CTS; /* port must be open to use tty struct */ @@ -1528,9 +1550,6 @@ static int digi_read_oob_callback(struct urb *urb) priv->dp_modem_signals &= ~TIOCM_CD; spin_unlock(&priv->dp_port_lock); - - if (wakeup) - tty_port_tty_wakeup(&port->port); } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) { spin_lock(&priv->dp_port_lock); priv->dp_transmit_idle = 1; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b251168eaeb4..5b42b8d760cb 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -214,7 +214,6 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_AUTO_M3_OP_COM_V2_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, @@ -607,7 +606,6 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) }, - { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONMX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, @@ -964,7 +962,6 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, - { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -973,14 +970,12 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, - { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, - { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, @@ -1034,9 +1029,6 @@ static const struct usb_device_id id_table_combined[] = { /* Sienna devices */ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, - /* IDS GmbH devices */ - { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, - { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 006e92d26bab..f3302516a1e4 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -158,9 +158,6 @@ /* Vardaan Enterprises Serial Interface VEUSB422R3 */ #define FTDI_VARDAAN_PID 0xF070 -/* Auto-M3 Ltd. - OP-COM USB V2 - OBD interface Adapter */ -#define FTDI_AUTO_M3_OP_COM_V2_PID 0x4f50 - /* * Xsens Technologies BV products (http://www.xsens.com). */ @@ -583,7 +580,6 @@ #define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ #define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */ #define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */ -#define FTDI_NT_ORIONMX_PID 0x7c93 /* OrionMX */ /* * Synapse Wireless product ids (FTDI_VID) @@ -1505,9 +1501,6 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ -#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ -#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ -#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ @@ -1573,13 +1566,6 @@ #define UNJO_VID 0x22B7 #define UNJO_ISODEBUG_V1_PID 0x150D -/* - * IDS GmbH - */ -#define IDS_VID 0x2CAF -#define IDS_SI31A_PID 0x13A2 -#define IDS_CM31A_PID 0x13A3 - /* * U-Blox products (http://www.u-blox.com). */ diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 14ab47f88251..b639d064e5da 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2966,32 +2966,26 @@ static int edge_startup(struct usb_serial *serial) response = -ENODEV; } - goto error; + usb_free_urb(edge_serial->interrupt_read_urb); + kfree(edge_serial->interrupt_in_buffer); + + usb_free_urb(edge_serial->read_urb); + kfree(edge_serial->bulk_in_buffer); + + kfree(edge_serial); + + return response; } /* start interrupt read for this edgeport this interrupt will * continue as long as the edgeport is connected */ response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL); - if (response) { + if (response) dev_err(ddev, "%s - Error %d submitting control urb\n", __func__, response); - - goto error; - } } return response; - -error: - usb_free_urb(edge_serial->interrupt_read_urb); - kfree(edge_serial->interrupt_in_buffer); - - usb_free_urb(edge_serial->read_urb); - kfree(edge_serial->bulk_in_buffer); - - kfree(edge_serial); - - return response; } diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index ef948285a142..1a966f25b3ef 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -551,29 +551,23 @@ static int iuu_uart_flush(struct usb_serial_port *port) struct device *dev = &port->dev; int i; int status; - u8 *rxcmd; + u8 rxcmd = IUU_UART_RX; struct iuu_private *priv = usb_get_serial_port_data(port); if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0) return -EIO; - rxcmd = kmalloc(1, GFP_KERNEL); - if (!rxcmd) - return -ENOMEM; - - rxcmd[0] = IUU_UART_RX; - for (i = 0; i < 2; i++) { - status = bulk_immediate(port, rxcmd, 1); + status = bulk_immediate(port, &rxcmd, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_write error\n", __func__); - goto out_free; + return status; } status = read_immediate(port, &priv->len, 1); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - goto out_free; + return status; } if (priv->len > 0) { @@ -581,16 +575,12 @@ static int iuu_uart_flush(struct usb_serial_port *port) status = read_immediate(port, priv->buf, priv->len); if (status != IUU_OPERATION_OK) { dev_dbg(dev, "%s - uart_flush_read error\n", __func__); - goto out_free; + return status; } } } dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__); iuu_led(port, 0, 0xF000, 0, 0xFF); - -out_free: - kfree(rxcmd); - return status; } diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index b8f7dca97b98..a79e9adf4e53 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -2417,22 +2417,22 @@ static int keyspan_port_probe(struct usb_serial_port *port) for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i) { p_priv->in_buffer[i] = kzalloc(IN_BUFLEN, GFP_KERNEL); if (!p_priv->in_buffer[i]) - goto err_free_in_buffer; + goto err_in_buffer; } for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i) { p_priv->out_buffer[i] = kzalloc(OUT_BUFLEN, GFP_KERNEL); if (!p_priv->out_buffer[i]) - goto err_free_out_buffer; + goto err_out_buffer; } p_priv->inack_buffer = kzalloc(INACK_BUFLEN, GFP_KERNEL); if (!p_priv->inack_buffer) - goto err_free_out_buffer; + goto err_inack_buffer; p_priv->outcont_buffer = kzalloc(OUTCONT_BUFLEN, GFP_KERNEL); if (!p_priv->outcont_buffer) - goto err_free_inack_buffer; + goto err_outcont_buffer; p_priv->device_details = d_details; @@ -2478,14 +2478,15 @@ static int keyspan_port_probe(struct usb_serial_port *port) return 0; -err_free_inack_buffer: +err_outcont_buffer: kfree(p_priv->inack_buffer); -err_free_out_buffer: +err_inack_buffer: for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i) kfree(p_priv->out_buffer[i]); -err_free_in_buffer: +err_out_buffer: for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i) kfree(p_priv->in_buffer[i]); +err_in_buffer: kfree(p_priv); return -ENOMEM; diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index c40b64244160..8a4047de43dc 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -44,12 +44,11 @@ #define DRIVER_AUTHOR "Brian Warner " #define DRIVER_DESC "USB Keyspan PDA Converter driver" -#define KEYSPAN_TX_THRESHOLD 16 - struct keyspan_pda_private { int tx_room; int tx_throttled; - struct work_struct unthrottle_work; + struct work_struct wakeup_work; + struct work_struct unthrottle_work; struct usb_serial *serial; struct usb_serial_port *port; }; @@ -102,6 +101,15 @@ static const struct usb_device_id id_table_fake_xircom[] = { }; #endif +static void keyspan_pda_wakeup_write(struct work_struct *work) +{ + struct keyspan_pda_private *priv = + container_of(work, struct keyspan_pda_private, wakeup_work); + struct usb_serial_port *port = priv->port; + + tty_port_tty_wakeup(&port->port); +} + static void keyspan_pda_request_unthrottle(struct work_struct *work) { struct keyspan_pda_private *priv = @@ -116,7 +124,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work) 7, /* request_unthrottle */ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, - KEYSPAN_TX_THRESHOLD, + 16, /* value: threshold */ 0, /* index */ NULL, 0, @@ -135,8 +143,6 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) int retval; int status = urb->status; struct keyspan_pda_private *priv; - unsigned long flags; - priv = usb_get_serial_port_data(port); switch (status) { @@ -170,21 +176,18 @@ static void keyspan_pda_rx_interrupt(struct urb *urb) break; case 1: /* status interrupt */ - if (len < 2) { + if (len < 3) { dev_warn(&port->dev, "short interrupt message received\n"); break; } - dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]); + dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]); switch (data[1]) { case 1: /* modemline change */ break; case 2: /* tx unthrottle interrupt */ - spin_lock_irqsave(&port->lock, flags); priv->tx_throttled = 0; - priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD); - spin_unlock_irqrestore(&port->lock, flags); /* queue up a wakeup at scheduler time */ - usb_serial_port_softint(port); + schedule_work(&priv->wakeup_work); break; default: break; @@ -444,7 +447,6 @@ static int keyspan_pda_write(struct tty_struct *tty, int request_unthrottle = 0; int rc = 0; struct keyspan_pda_private *priv; - unsigned long flags; priv = usb_get_serial_port_data(port); /* guess how much room is left in the device's ring buffer, and if we @@ -464,13 +466,13 @@ static int keyspan_pda_write(struct tty_struct *tty, the TX urb is in-flight (wait until it completes) the device is full (wait until it says there is room) */ - spin_lock_irqsave(&port->lock, flags); + spin_lock_bh(&port->lock); if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) { - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock_bh(&port->lock); return 0; } clear_bit(0, &port->write_urbs_free); - spin_unlock_irqrestore(&port->lock, flags); + spin_unlock_bh(&port->lock); /* At this point the URB is in our control, nobody else can submit it again (the only sudden transition was the one from EINPROGRESS to @@ -516,8 +518,7 @@ static int keyspan_pda_write(struct tty_struct *tty, goto exit; } } - - if (count >= priv->tx_room) { + if (count > priv->tx_room) { /* we're about to completely fill the Tx buffer, so we'll be throttled afterwards. */ count = priv->tx_room; @@ -550,7 +551,7 @@ static int keyspan_pda_write(struct tty_struct *tty, rc = count; exit: - if (rc <= 0) + if (rc < 0) set_bit(0, &port->write_urbs_free); return rc; } @@ -559,29 +560,28 @@ exit: static void keyspan_pda_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; + struct keyspan_pda_private *priv; set_bit(0, &port->write_urbs_free); + priv = usb_get_serial_port_data(port); /* queue up a wakeup at scheduler time */ - usb_serial_port_softint(port); + schedule_work(&priv->wakeup_work); } static int keyspan_pda_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; - struct keyspan_pda_private *priv = usb_get_serial_port_data(port); - unsigned long flags; - int room = 0; - - spin_lock_irqsave(&port->lock, flags); - if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled) - room = priv->tx_room; - spin_unlock_irqrestore(&port->lock, flags); - - return room; + struct keyspan_pda_private *priv; + priv = usb_get_serial_port_data(port); + /* used by n_tty.c for processing of tabs and such. Giving it our + conservative guess is probably good enough, but needs testing by + running a console through the device. */ + return priv->tx_room; } + static int keyspan_pda_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; @@ -660,12 +660,8 @@ error: } static void keyspan_pda_close(struct usb_serial_port *port) { - struct keyspan_pda_private *priv = usb_get_serial_port_data(port); - usb_kill_urb(port->write_urb); usb_kill_urb(port->interrupt_in_urb); - - cancel_work_sync(&priv->unthrottle_work); } @@ -736,6 +732,7 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port) if (!priv) return -ENOMEM; + INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); priv->serial = port->serial; priv->port = port; diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 7b006fd4de0e..83c823d32ff9 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -293,12 +293,12 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) priv->cfg.unknown2 = cfg->unknown2; spin_unlock_irqrestore(&priv->lock, flags); - kfree(cfg); - /* READ_ON and urb submission */ rc = usb_serial_generic_open(tty, port); - if (rc) - return rc; + if (rc) { + retval = rc; + goto err_free_cfg; + } rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), @@ -341,6 +341,8 @@ err_disable_read: KLSI_TIMEOUT); err_generic_close: usb_serial_generic_close(port); +err_free_cfg: + kfree(cfg); return retval; } diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 2c85801ffccd..1927f41ccafc 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -229,10 +229,8 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, int status; buf = kmalloc(1, GFP_KERNEL); - if (!buf) { - *data = 0; + if (!buf) return -ENOMEM; - } status = usb_control_msg(usbdev, pipe, request, requesttype, value, index, buf, 1, MOS_WDR_TIMEOUT); @@ -642,8 +640,6 @@ static void parport_mos7715_restore_state(struct parport *pp, spin_unlock(&release_lock); return; } - mos_parport->shadowDCR = s->u.pc.ctr; - mos_parport->shadowECR = s->u.pc.ecr; write_parport_reg_nonblock(mos_parport, MOS7720_DCR, mos_parport->shadowDCR); write_parport_reg_nonblock(mos_parport, MOS7720_ECR, @@ -1241,10 +1237,8 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) { - bytes_sent = -ENOMEM; + if (!urb->transfer_buffer) goto exit; - } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 8a94c85428d9..4add6bc38d64 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -126,6 +126,7 @@ #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 +#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 @@ -206,6 +207,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)}, @@ -1360,10 +1362,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_ATOMIC); - if (!urb->transfer_buffer) { - bytes_sent = -ENOMEM; + if (!urb->transfer_buffer) goto exit; - } } transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE); diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index cc0bf59bd08d..76564b3bebb9 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -27,7 +27,6 @@ #define ZYXEL_VENDOR_ID 0x0586 #define ZYXEL_OMNINET_ID 0x1000 -#define ZYXEL_OMNI_56K_PLUS_ID 0x1500 /* This one seems to be a re-branded ZyXEL device */ #define BT_IGNITIONPRO_ID 0x2000 @@ -45,7 +44,6 @@ static int omninet_port_remove(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, - { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNI_56K_PLUS_ID) }, { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c5d0d9e2bff2..34ac1265afe4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -201,8 +201,6 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 -#define DELL_PRODUCT_5829E_ESIM 0x81e4 -#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -243,7 +241,6 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 -#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -422,14 +419,11 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_PH8 0x0053 #define CINTERION_PRODUCT_AHXX 0x0055 #define CINTERION_PRODUCT_PLXX 0x0060 -#define CINTERION_PRODUCT_EXS82 0x006c #define CINTERION_PRODUCT_PH8_2RMNET 0x0082 #define CINTERION_PRODUCT_PH8_AUDIO 0x0083 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_CLS8 0x00b0 -#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 -#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -568,9 +562,6 @@ static void option_instat_callback(struct urb *urb); /* Device flags */ -/* Highest interface number which can be used with NCTRL() and RSVD() */ -#define FLAG_IFNUM_MAX 7 - /* Interface does not support modem-control requests */ #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8) @@ -1060,10 +1051,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, - { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), - .driver_info = RSVD(0) | RSVD(6) }, - { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), - .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1105,8 +1092,6 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), - .driver_info = RSVD(3) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = RSVD(4) }, @@ -1191,24 +1176,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ - .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */ - .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */ - .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */ - .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */ - .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */ - .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */ - .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */ - .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ - .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1223,8 +1190,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1204, 0xff), /* Telit LE910Cx (MBIM) */ - .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), @@ -1251,20 +1216,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ - .driver_info = NCTRL(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ - .driver_info = NCTRL(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ - .driver_info = NCTRL(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ - .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, - { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ - .driver_info = NCTRL(0) | ZLP }, - { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ - .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1592,8 +1545,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, - { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ - .driver_info = RSVD(3) | RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, @@ -1637,8 +1589,6 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ - .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1666,6 +1616,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), @@ -1934,17 +1885,12 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff), .driver_info = RSVD(0) | RSVD(4) }, - { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EXS82, 0xff) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, - { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), - .driver_info = RSVD(3)}, - { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), - .driver_info = RSVD(0)}, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), @@ -2085,21 +2031,12 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, - { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */ - .driver_info = RSVD(3) }, - { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, - { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ - { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ - { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ - { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ - { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ - .driver_info = RSVD(4) }, - { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ @@ -2146,14 +2083,6 @@ static struct usb_serial_driver * const serial_drivers[] = { module_usb_serial_driver(serial_drivers, option_ids); -static bool iface_is_reserved(unsigned long device_flags, u8 ifnum) -{ - if (ifnum > FLAG_IFNUM_MAX) - return false; - - return device_flags & RSVD(ifnum); -} - static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { @@ -2171,7 +2100,7 @@ static int option_probe(struct usb_serial *serial, * the same class/subclass/protocol as the serial interfaces. Look at * the Windows driver .INF files for reserved interface numbers. */ - if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber)) + if (device_flags & RSVD(iface_desc->bInterfaceNumber)) return -ENODEV; /* * Don't bind network interface on Samsung GT-B3730, it is handled by @@ -2188,14 +2117,6 @@ static int option_probe(struct usb_serial *serial, return 0; } -static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum) -{ - if (ifnum > FLAG_IFNUM_MAX) - return false; - - return device_flags & NCTRL(ifnum); -} - static int option_attach(struct usb_serial *serial) { struct usb_interface_descriptor *iface_desc; @@ -2211,7 +2132,7 @@ static int option_attach(struct usb_serial *serial) iface_desc = &serial->interface->cur_altsetting->desc; - if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber)) + if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber))) data->use_send_setup = 1; if (device_flags & ZLP) diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 3dd0bbb36dd2..bf5533d6d83b 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -102,7 +102,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, - { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { } /* Terminating entry */ diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 62b8cd673aa1..9d27c076f477 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -156,7 +156,6 @@ /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ #define ADLINK_VENDOR_ID 0x0b63 #define ADLINK_ND6530_PRODUCT_ID 0x6530 -#define ADLINK_ND6530GC_PRODUCT_ID 0x653a /* SMART USB Serial Adapter */ #define SMART_VENDOR_ID 0x0b8c diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index f0bd6a66f551..11fb4d78e2db 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -169,7 +169,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ - {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index e548a6094d22..82f28192694f 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -419,7 +419,7 @@ static void qt2_close(struct usb_serial_port *port) /* flush the port transmit buffer */ i = usb_control_msg(serial->dev, - usb_sndctrlpipe(serial->dev, 0), + usb_rcvctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 1, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); @@ -429,7 +429,7 @@ static void qt2_close(struct usb_serial_port *port) /* flush the port receive buffer */ i = usb_control_msg(serial->dev, - usb_sndctrlpipe(serial->dev, 0), + usb_rcvctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 0, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); @@ -701,7 +701,7 @@ static int qt2_attach(struct usb_serial *serial) int status; /* power on unit */ - status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0xc2, 0x40, 0x8000, 0, NULL, 0, QT2_USB_TIMEOUT); if (status < 0) { diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 3357ceef0b01..2c2ac15d2554 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -310,16 +310,6 @@ UNUSUAL_DEV( 0x045e, 0xffff, 0x0000, 0x0000, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), -/* - * Reported by James Buren - * Virtual ISOs cannot be remounted if ejected while the device is locked - * Disable locking to mimic Windows behavior that bypasses the issue - */ -UNUSUAL_DEV( 0x04c5, 0x2028, 0x0001, 0x0001, - "iODD", - "2531/2541", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE), - /* * This virtual floppy is found in Sun equipment (x4600, x4200m2, etc.) * Reported by Pete Zaitcev @@ -2155,16 +2145,6 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, US_FL_SCM_MULT_TARG ), -/* - * Reported by DocMAX - * and Thomas Weißschuh - */ -UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999, - "VIA Labs, Inc.", - "VL817 SATA Bridge", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_IGNORE_UAS), - UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 648130903b03..b7171c19fca2 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -54,13 +54,6 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES), -/* Reported-by: Julian Sikorski */ -UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, - "LaCie", - "Rugged USB3-FW", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_IGNORE_UAS), - /* * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI * commands in UAS mode. Observed with the 1.28 firmware; are there others? @@ -169,13 +162,6 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), -/* Reported-by: Thinh Nguyen */ -UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, - "PNY", - "Pro Elite SSD", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_NO_ATA_1X), - /* Reported-by: Thinh Nguyen */ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, "PNY", diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 5501eb89f395..4aad99a59958 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -60,8 +60,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, int sockfd = 0; struct socket *socket; int rv; - struct task_struct *tcp_rx = NULL; - struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); @@ -85,47 +83,23 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, } socket = sockfd_lookup(sockfd, &err); - if (!socket) { - dev_err(dev, "failed to lookup sock"); + if (!socket) goto err; - } - if (socket->type != SOCK_STREAM) { - dev_err(dev, "Expecting SOCK_STREAM - found %d", - socket->type); - goto sock_err; - } + sdev->ud.tcp_socket = socket; + sdev->ud.sockfd = sockfd; - /* unlock and create threads and get tasks */ spin_unlock_irq(&sdev->ud.lock); - tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); - if (IS_ERR(tcp_rx)) { - sockfd_put(socket); - return -EINVAL; - } - tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); - if (IS_ERR(tcp_tx)) { - kthread_stop(tcp_rx); - sockfd_put(socket); - return -EINVAL; - } - /* get task structs now */ - get_task_struct(tcp_rx); - get_task_struct(tcp_tx); + sdev->ud.tcp_rx = kthread_get_run(stub_rx_loop, &sdev->ud, + "stub_rx"); + sdev->ud.tcp_tx = kthread_get_run(stub_tx_loop, &sdev->ud, + "stub_tx"); - /* lock and update sdev->ud state */ spin_lock_irq(&sdev->ud.lock); - sdev->ud.tcp_socket = socket; - sdev->ud.sockfd = sockfd; - sdev->ud.tcp_rx = tcp_rx; - sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); - wake_up_process(sdev->ud.tcp_rx); - wake_up_process(sdev->ud.tcp_tx); - } else { dev_info(dev, "stub down\n"); @@ -140,8 +114,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr, return count; -sock_err: - sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); return -EINVAL; diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index 1d681990b398..b9432fdec775 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c @@ -202,16 +202,8 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); - if (!socket) { - dev_err(dev, "failed to lookup sock"); + if (!socket) return -EINVAL; - } - if (socket->type != SOCK_STREAM) { - dev_err(dev, "Expecting SOCK_STREAM - found %d", - socket->type); - sockfd_put(socket); - return -EINVAL; - } /* now need lock until setting vdev status as used */ diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 9891001244ea..666b234acca0 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1488,7 +1488,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) if (len == 0xFF) { len = vfio_ext_cap_len(vdev, ecap, epos); if (len < 0) - return len; + return ret; } } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 9b170ce16011..e65b142d3422 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -192,7 +192,7 @@ err_irq: vfio_platform_regions_cleanup(vdev); err_reg: mutex_unlock(&driver_lock); - module_put(vdev->parent_module); + module_put(THIS_MODULE); return ret; } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 307c9d45ec48..e02b179549a0 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -377,7 +377,6 @@ static void handle_tx(struct vhost_net *net) size_t hdr_size; struct socket *sock; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); - struct ubuf_info *ubuf; bool zcopy, zcopy_used; int sent_pkts = 0; @@ -445,7 +444,9 @@ static void handle_tx(struct vhost_net *net) /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { + struct ubuf_info *ubuf; ubuf = nvq->ubuf_info + nvq->upend_idx; + vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->callback = vhost_zerocopy_callback; @@ -464,8 +465,7 @@ static void handle_tx(struct vhost_net *net) err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { - if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) - vhost_net_ubuf_put(ubufs); + vhost_net_ubuf_put(ubufs); nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; } diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index da47542496cc..d56736655dec 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -329,7 +329,7 @@ __vringh_iov(struct vringh *vrh, u16 i, iov = wiov; else { iov = riov; - if (unlikely(wiov && wiov->used)) { + if (unlikely(wiov && wiov->i)) { vringh_bad("Readable desc %p after writable", &descs[i]); err = -EINVAL; diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c index ab882c04f975..5ef6f9d420a2 100644 --- a/drivers/video/backlight/lm3630a_bl.c +++ b/drivers/video/backlight/lm3630a_bl.c @@ -183,7 +183,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl) if ((pwm_ctrl & LM3630A_PWM_BANK_A) != 0) { lm3630a_pwm_ctrl(pchip, bl->props.brightness, bl->props.max_brightness); - return 0; + return bl->props.brightness; } /* disable sleep */ @@ -203,8 +203,8 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl) return 0; out_i2c_err: - dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret)); - return ret; + dev_err(pchip->dev, "i2c failed to access\n"); + return bl->props.brightness; } static int lm3630a_bank_a_get_brightness(struct backlight_device *bl) @@ -260,7 +260,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl) if ((pwm_ctrl & LM3630A_PWM_BANK_B) != 0) { lm3630a_pwm_ctrl(pchip, bl->props.brightness, bl->props.max_brightness); - return 0; + return bl->props.brightness; } /* disable sleep */ @@ -280,8 +280,8 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl) return 0; out_i2c_err: - dev_err(pchip->dev, "i2c failed to access (%pe)\n", ERR_PTR(ret)); - return ret; + dev_err(pchip->dev, "i2c failed to access REG_CTRL\n"); + return bl->props.brightness; } static int lm3630a_bank_b_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index ceae076dc96b..29bb67921639 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1986,7 +1986,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, return -EINVAL; DPRINTK("resize now %ix%i\n", var.xres, var.yres); - if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) { + if (CON_IS_VISIBLE(vc)) { var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; fb_set_var(info, &var); diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 9f856b417462..026fd1215933 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c @@ -316,13 +316,13 @@ static unsigned long sticon_getxy(struct vc_data *conp, unsigned long pos, static u8 sticon_build_attr(struct vc_data *conp, u8 color, u8 intens, u8 blink, u8 underline, u8 reverse, u8 italic) { - u8 fg = color & 7; - u8 bg = (color & 0x70) >> 4; + u8 attr = ((color & 0x70) >> 1) | ((color & 7)); - if (reverse) - return (fg << 3) | bg; - else - return (bg << 3) | fg; + if (reverse) { + color = ((color >> 3) & 0x7) | ((color & 0x7) << 3); + } + + return attr; } static void sticon_invert_region(struct vc_data *conp, u16 *p, int count) diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index da9ae99fc196..c35ae8c732f6 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -422,17 +422,11 @@ static void vgacon_init(struct vc_data *c, int init) struct uni_pagedir *p; /* - * We cannot be loaded as a module, therefore init will be 1 - * if we are the default console, however if we are a fallback - * console, for example if fbcon has failed registration, then - * init will be 0, so we need to make sure our boot parameters - * have been copied to the console structure for vgacon_resize - * ultimately called by vc_resize. Any subsequent calls to - * vgacon_init init will have init set to 0 too. + * We cannot be loaded as a module, therefore init is always 1, + * but vgacon_init can be called more than once, and init will + * not be 1. */ c->vc_can_do_color = vga_can_do_color; - c->vc_scan_lines = vga_scan_lines; - c->vc_font.height = c->vc_cell_height = vga_video_font_height; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { @@ -441,6 +435,8 @@ static void vgacon_init(struct vc_data *c, int init) } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); + c->vc_scan_lines = vga_scan_lines; + c->vc_font.height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; @@ -578,32 +574,32 @@ static void vgacon_cursor(struct vc_data *c, int mode) switch (c->vc_cursor_type & 0x0f) { case CUR_UNDERLINE: vgacon_set_cursor_size(c->vc_x, - c->vc_cell_height - - (c->vc_cell_height < + c->vc_font.height - + (c->vc_font.height < 10 ? 2 : 3), - c->vc_cell_height - - (c->vc_cell_height < + c->vc_font.height - + (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: vgacon_set_cursor_size(c->vc_x, - c->vc_cell_height / 3, - c->vc_cell_height - - (c->vc_cell_height < + c->vc_font.height / 3, + c->vc_font.height - + (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: vgacon_set_cursor_size(c->vc_x, - (c->vc_cell_height * 2) / 3, - c->vc_cell_height - - (c->vc_cell_height < + (c->vc_font.height * 2) / 3, + c->vc_font.height - + (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: vgacon_set_cursor_size(c->vc_x, - c->vc_cell_height / 2, - c->vc_cell_height - - (c->vc_cell_height < + c->vc_font.height / 2, + c->vc_font.height - + (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_NONE: @@ -614,7 +610,7 @@ static void vgacon_cursor(struct vc_data *c, int mode) break; default: vgacon_set_cursor_size(c->vc_x, 1, - c->vc_cell_height); + c->vc_font.height); break; } break; @@ -625,13 +621,13 @@ static int vgacon_doresize(struct vc_data *c, unsigned int width, unsigned int height) { unsigned long flags; - unsigned int scanlines = height * c->vc_cell_height; + unsigned int scanlines = height * c->vc_font.height; u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; raw_spin_lock_irqsave(&vga_lock, flags); vgacon_xres = width * VGA_FONTWIDTH; - vgacon_yres = height * c->vc_cell_height; + vgacon_yres = height * c->vc_font.height; if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); max_scan = inb_p(vga_video_port_val); @@ -686,9 +682,9 @@ static int vgacon_doresize(struct vc_data *c, static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; - int y = c->vc_rows * c->vc_cell_height; + int y = c->vc_rows * c->vc_font.height; int rows = screen_info.orig_video_lines * vga_default_font_height/ - c->vc_cell_height; + c->vc_font.height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to @@ -1129,7 +1125,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) cursor_size_lastto = 0; c->vc_sw->con_cursor(c, CM_DRAW); } - c->vc_font.height = c->vc_cell_height = fontheight; + c->vc_font.height = fontheight; vc_resize(c, 0, rows); /* Adjust console size */ } } @@ -1183,20 +1179,12 @@ static int vgacon_resize(struct vc_data *c, unsigned int width, if ((width << 1) * height > vga_vram_size) return -EINVAL; - if (user) { - /* - * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed - * the video mode! Set the new defaults then and go away. - */ - screen_info.orig_video_cols = width; - screen_info.orig_video_lines = height; - vga_default_font_height = c->vc_cell_height; - return 0; - } if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ - c->vc_cell_height) - return -EINVAL; + c->vc_font.height) + /* let svgatextmode tinker with video timings and + return success */ + return (user) ? 0 : -EINVAL; if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index a6c67e189efa..759aaeb6a196 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1397,7 +1397,6 @@ config FB_ATY select FB_CFB_IMAGEBLIT select FB_BACKLIGHT if FB_ATY_BACKLIGHT select FB_MACMODES if PPC - select FB_ATY_CT if SPARC64 && PCI help This driver supports graphics boards with the ATI Mach64 chips. Say Y if you have such a graphics board. @@ -1408,6 +1407,7 @@ config FB_ATY config FB_ATY_CT bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support" depends on PCI && FB_ATY + default y if SPARC64 && PCI help Say Y here to support use of ATI's 64-bit Rage boards (or other boards based on the Mach64 CT, VT, GT, and LT chipsets) as a diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c index dbcc6ebaf904..7e8ddf00ccc2 100644 --- a/drivers/video/fbdev/asiliantfb.c +++ b/drivers/video/fbdev/asiliantfb.c @@ -227,9 +227,6 @@ static int asiliantfb_check_var(struct fb_var_screeninfo *var, { unsigned long Ftarget, ratio, remainder; - if (!var->pixclock) - return -EINVAL; - ratio = 1000000 / var->pixclock; remainder = 1000000 % var->pixclock; Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index 84a3778552eb..314b7eceb81c 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -332,7 +332,7 @@ static struct fb_var_screeninfo chipsfb_var = { static void init_chips(struct fb_info *p, unsigned long addr) { - fb_memset(p->screen_base, 0, 0x100000); + memset(p->screen_base, 0, 0x100000); p->fix = chipsfb_fix; p->fix.smem_start = addr; diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index 1555106ae38f..1f8b480b1a4d 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) if (!len) return 0; - cmap->red = kzalloc(size, flags); + cmap->red = kmalloc(size, flags); if (!cmap->red) goto fail; - cmap->green = kzalloc(size, flags); + cmap->green = kmalloc(size, flags); if (!cmap->green) goto fail; - cmap->blue = kzalloc(size, flags); + cmap->blue = kmalloc(size, flags); if (!cmap->blue) goto fail; if (transp) { - cmap->transp = kzalloc(size, flags); + cmap->transp = kmalloc(size, flags); if (!cmap->transp) goto fail; } else { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index cf533f1fa699..cce75a2af919 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -32,7 +32,6 @@ #include #include #include -#include #include @@ -982,7 +981,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) if ((var->activate & FB_ACTIVATE_FORCE) || memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) { u32 activate = var->activate; - u32 unused; /* When using FOURCC mode, make sure the red, green, blue and * transp fields are set to 0. @@ -1003,15 +1001,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) goto done; } - /* bitfill_aligned() assumes that it's at least 8x8 */ - if (var->xres < 8 || var->yres < 8) - return -EINVAL; - - /* Too huge resolution causes multiplication overflow. */ - if (check_mul_overflow(var->xres, var->yres, &unused) || - check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused)) - return -EINVAL; - ret = info->fbops->fb_check_var(var, info); if (ret) diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index 46ac8bbb376d..4a397c7c1b56 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -286,7 +286,7 @@ static int hga_card_detect(void) hga_vram = ioremap(0xb0000, hga_vram_len); if (!hga_vram) - return -ENOMEM; + goto error; if (request_region(0x3b0, 12, "hgafb")) release_io_ports = 1; @@ -346,18 +346,13 @@ static int hga_card_detect(void) hga_type_name = "Hercules"; break; } - return 0; + return 1; error: if (release_io_ports) release_region(0x3b0, 12); if (release_io_port) release_region(0x3bf, 1); - - iounmap(hga_vram); - - pr_err("hgafb: HGA card not detected.\n"); - - return -EINVAL; + return 0; } /** @@ -555,11 +550,13 @@ static struct fb_ops hgafb_ops = { static int hgafb_probe(struct platform_device *pdev) { struct fb_info *info; - int ret; - ret = hga_card_detect(); - if (ret) - return ret; + if (! hga_card_detect()) { + printk(KERN_INFO "hgafb: HGA card not detected.\n"); + if (hga_vram) + iounmap(hga_vram); + return -EINVAL; + } printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n", hga_type_name, hga_vram_len/1024); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 883c06381e7c..299412abb165 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -713,9 +713,11 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) } /* - * Map the VRAM cacheable for performance. + * Map the VRAM cacheable for performance. This is also required for + * VM Connect to display properly for ARM64 Linux VM, as the host also + * maps the VRAM cacheable. */ - fb_virt = ioremap_wc(par->mem->start, screen_fb_size); + fb_virt = ioremap_cache(par->mem->start, screen_fb_size); if (!fb_virt) goto err2; diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 9b167f7ef6c6..4994a540f680 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1517,6 +1517,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) info->fix.smem_start = addr; info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000); + if (!info->screen_base) { + release_mem_region(addr, size); + framebuffer_release(info); + return -ENOMEM; + } info->fix.mmio_start = addr + 0x800000; par->dc_regs = ioremap(addr + 0x800000, 0x1000); par->cmap_regs_phys = addr + 0x840000; diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c index d98c3f5d80df..5bb01533271e 100644 --- a/drivers/video/fbdev/kyro/fbdev.c +++ b/drivers/video/fbdev/kyro/fbdev.c @@ -372,11 +372,6 @@ static int kyro_dev_overlay_viewport_set(u32 x, u32 y, u32 ulWidth, u32 ulHeight /* probably haven't called CreateOverlay yet */ return -EINVAL; - if (ulWidth == 0 || ulWidth == 0xffffffff || - ulHeight == 0 || ulHeight == 0xffffffff || - (x < 2 && ulWidth + 2 == 0)) - return -EINVAL; - /* Stop Ramdac Output */ DisableRamdacOutput(deviceInfo.pSTGReg); @@ -399,9 +394,6 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct kyrofb_info *par = info->par; - if (!var->pixclock) - return -EINVAL; - if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) { printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel); return -EINVAL; diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index 4deb88126936..290e14a5d7de 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -638,17 +638,12 @@ static void __mdss_fb_idle_notify_work(struct work_struct *work) /* Notify idle-ness here */ pr_debug("Idle timeout %dms expired!\n", mfd->idle_time); - - mfd->idle_state = MDSS_FB_IDLE; - /* - * idle_notify node events are used to reduce MDP load when idle, - * this is not needed for command mode panels. - */ - if (mfd->idle_time && mfd->panel.type != MIPI_CMD_PANEL) + if (mfd->idle_time) sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_notify"); - sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_state"); + mfd->idle_state = MDSS_FB_IDLE; } + static ssize_t mdss_fb_get_fps_info(struct device *dev, struct device_attribute *attr, char *buf) { @@ -709,26 +704,6 @@ static ssize_t mdss_fb_get_idle_notify(struct device *dev, return ret; } -static ssize_t mdss_fb_get_idle_state(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct fb_info *fbi = dev_get_drvdata(dev); - struct msm_fb_data_type *mfd = fbi->par; - const char *state_strs[] = { - [MDSS_FB_NOT_IDLE] = "active", - [MDSS_FB_IDLE_TIMER_RUNNING] = "pending", - [MDSS_FB_IDLE] = "idle", - }; - int state = mfd->idle_state; - const char *s; - if (state < ARRAY_SIZE(state_strs) && state_strs[state]) - s = state_strs[state]; - else - s = "invalid"; - - return scnprintf(buf, PAGE_SIZE, "%s\n", s); -} - static ssize_t mdss_fb_get_panel_info(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1065,7 +1040,6 @@ static DEVICE_ATTR(show_blank_event, S_IRUGO, mdss_mdp_show_blank_event, NULL); static DEVICE_ATTR(idle_time, S_IRUGO | S_IWUSR | S_IWGRP, mdss_fb_get_idle_time, mdss_fb_set_idle_time); static DEVICE_ATTR(idle_notify, S_IRUGO, mdss_fb_get_idle_notify, NULL); -static DEVICE_ATTR(idle_state, S_IRUGO, mdss_fb_get_idle_state, NULL); static DEVICE_ATTR(msm_fb_panel_info, S_IRUGO, mdss_fb_get_panel_info, NULL); static DEVICE_ATTR(msm_fb_src_split_info, S_IRUGO, mdss_fb_get_src_split_info, NULL); @@ -1087,7 +1061,6 @@ static struct attribute *mdss_fb_attrs[] = { &dev_attr_show_blank_event.attr, &dev_attr_idle_time.attr, &dev_attr_idle_notify.attr, - &dev_attr_idle_state.attr, &dev_attr_msm_fb_panel_info.attr, &dev_attr_msm_fb_src_split_info.attr, &dev_attr_msm_fb_thermal_level.attr, @@ -3456,18 +3429,14 @@ static int __mdss_fb_sync_buf_done_callback(struct notifier_block *p, ret = __mdss_fb_wait_for_fence_sub(sync_pt_data, sync_pt_data->temp_fen, fence_cnt); } - if (mfd->idle_time) { - if (!mod_delayed_work(system_wq, + if (mfd->idle_time && !mod_delayed_work(system_wq, &mfd->idle_notify_work, msecs_to_jiffies(mfd->idle_time))) - pr_debug("fb%d: restarted idle work\n", - mfd->index); - mfd->idle_state = MDSS_FB_IDLE_TIMER_RUNNING; - } else { - mfd->idle_state = MDSS_FB_IDLE; - } + pr_debug("fb%d: restarted idle work\n", + mfd->index); if (ret == -ETIME) ret = NOTIFY_BAD; + mfd->idle_state = MDSS_FB_IDLE_TIMER_RUNNING; break; case MDP_NOTIFY_FRAME_FLUSHED: pr_debug("%s: frame flushed\n", sync_pt_data->fence_name); diff --git a/drivers/video/fbdev/msm/mdss_hdmi_cec.c b/drivers/video/fbdev/msm/mdss_hdmi_cec.c index 5fe3f710c29e..12a9267f3749 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_cec.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_cec.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2017,2020-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2017, 2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -469,12 +469,8 @@ static int hdmi_cec_enable(void *input, bool enable) } if (enable) { - /* - * 19.2Mhz * 0.00005 us = 960 = 0x3C0 - * CEC Rd/Wr logic is properly working with - * finetuned value of 0x3D4 = 51 us. - */ - DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3D4 & 0xFFF) | BIT(16)); + /* 19.2Mhz * 0.00005 us = 950 = 0x3B6 */ + DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3B6 & 0xFFF) | BIT(16)); hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION); if (hdmi_hw_version >= CEC_SUPPORTED_HW_VERSION) { diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c index 5095f4ae49b5..6f0234500d70 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c @@ -65,11 +65,6 @@ #define EDID_VENDOR_ID_SIZE 4 #define EDID_IEEE_REG_ID 0x0c03 -enum edid_screen_orientation { - LANDSCAPE = 1, - PORTRAIT = 2, -}; - enum edid_sink_mode { SINK_MODE_DVI, SINK_MODE_HDMI @@ -148,8 +143,6 @@ struct hdmi_edid_ctrl { u8 cea_blks; /* DC: MSB -> LSB: Y420_48|Y420_36|Y420_30|RGB48|RGB36|RGB30|Y444 */ u8 deep_color; - u8 physical_width; - u8 physical_height; u16 physical_address; u32 video_resolution; /* selected by user */ u32 sink_mode; /* HDMI or DVI */ @@ -174,9 +167,6 @@ struct hdmi_edid_ctrl { bool y420_cmdb_supports_all; struct hdmi_edid_y420_cmdb y420_cmdb; - enum edid_screen_orientation orientation; - enum aspect_ratio aspect_ratio; - struct hdmi_edid_sink_data sink_data; struct hdmi_edid_init_data init_data; struct hdmi_edid_sink_caps sink_caps; @@ -264,11 +254,6 @@ int hdmi_edid_reset_parser(void *input) edid_ctrl->y420_cmdb_supports_all = false; kfree(edid_ctrl->y420_cmdb.vic_list); memset(&edid_ctrl->y420_cmdb, 0, sizeof(edid_ctrl->y420_cmdb)); - - edid_ctrl->physical_width = 0; - edid_ctrl->physical_height = 0; - edid_ctrl->orientation = 0; - edid_ctrl->aspect_ratio = HDMI_RES_AR_INVALID; return 0; } @@ -464,30 +449,6 @@ static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev, static DEVICE_ATTR(edid_modes, S_IRUGO | S_IWUSR, hdmi_edid_sysfs_rda_modes, hdmi_edid_sysfs_wta_modes); -static ssize_t hdmi_edid_sysfs_rda_screen_size(struct device *dev, - struct device_attribute *attr, char *buf) -{ - ssize_t ret = 0; - struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev); - - if (!edid_ctrl) { - DEV_ERR("%s: invalid input\n", __func__); - return -EINVAL; - } - - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d", - (edid_ctrl->physical_width * 10)); - ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %d", - (edid_ctrl->physical_height * 10)); - - DEV_DBG("%s: '%s'\n", __func__, buf); - ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); - - return ret; -} /* hdmi_edid_sysfs_rda_screen_size */ -static DEVICE_ATTR(edid_screen_size, S_IRUGO, hdmi_edid_sysfs_rda_screen_size, - NULL); - static ssize_t hdmi_edid_sysfs_rda_res_info_data(struct device *dev, struct device_attribute *attr, char *buf) { @@ -894,7 +855,6 @@ static DEVICE_ATTR(hdr_data, S_IRUGO, hdmi_edid_sysfs_rda_hdr_data, NULL); static struct attribute *hdmi_edid_fs_attrs[] = { &dev_attr_edid_modes.attr, - &dev_attr_edid_screen_size.attr, &dev_attr_pa.attr, &dev_attr_scan_info.attr, &dev_attr_edid_3d_modes.attr, @@ -1555,68 +1515,6 @@ static u32 hdmi_edid_extract_ieee_reg_id(struct hdmi_edid_ctrl *edid_ctrl, return ((u32)vsd[3] << 16) + ((u32)vsd[2] << 8) + (u32)vsd[1]; } /* hdmi_edid_extract_ieee_reg_id */ -static void hdmi_edid_extract_bdpf(struct hdmi_edid_ctrl *edid_ctrl) -{ - u8 *edid_buf = NULL; - - if (!edid_ctrl) { - DEV_ERR("%s: invalid input\n", __func__); - return; - } - - edid_buf = edid_ctrl->edid_buf; - if (edid_buf[21] && edid_buf[22]) { - edid_ctrl->physical_width = edid_buf[21]; - edid_ctrl->physical_height = edid_buf[22]; - - DEV_DBG("%s: EDID: Horizontal Screen Size = %d cm\n", - __func__, edid_ctrl->physical_width); - DEV_DBG("%s: EDID: Vertical Screen Size = %d cm\n", - __func__, edid_ctrl->physical_height); - } else if (edid_buf[21]) { - edid_ctrl->orientation = LANDSCAPE; - switch (edid_buf[21]) { - case 0x4F: - edid_ctrl->aspect_ratio = HDMI_RES_AR_16_9; - break; - case 0x3D: - edid_ctrl->aspect_ratio = HDMI_RES_AR_16_10; - break; - case 0x22: - edid_ctrl->aspect_ratio = HDMI_RES_AR_4_3; - break; - case 0x1A: - edid_ctrl->aspect_ratio = HDMI_RES_AR_5_4; - break; - } - DEV_DBG("%s: EDID: Landscape Aspect Ratio = %d\n", - __func__, edid_ctrl->aspect_ratio); - } else if (edid_buf[22]) { - edid_ctrl->orientation = PORTRAIT; - switch (edid_buf[22]) { - case 0x4F: - edid_ctrl->aspect_ratio = HDMI_RES_AR_16_9; - break; - case 0x3D: - edid_ctrl->aspect_ratio = HDMI_RES_AR_16_10; - break; - case 0x22: - edid_ctrl->aspect_ratio = HDMI_RES_AR_4_3; - break; - case 0x1A: - edid_ctrl->aspect_ratio = HDMI_RES_AR_5_4; - break; - } - DEV_DBG("%s: EDID: Portrait Aspect Ratio = %d\n", - __func__, edid_ctrl->aspect_ratio); - } else { - pr_debug("%s: Undefined Screen size/Aspect ratio\n", __func__); - edid_ctrl->orientation = 0; - edid_ctrl->physical_width = 0; - edid_ctrl->physical_height = 0; - } -} - static void hdmi_edid_extract_vendor_id(struct hdmi_edid_ctrl *edid_ctrl) { char *vendor_id; @@ -2564,8 +2462,6 @@ int hdmi_edid_parser(void *input) hdmi_edid_extract_vendor_id(edid_ctrl); - hdmi_edid_extract_bdpf(edid_ctrl); - /* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */ num_of_cea_blocks = edid_buf[EDID_BLOCK_SIZE - 2]; DEV_DBG("%s: No. of CEA/Extended EDID blocks is [%u]\n", __func__, @@ -3018,30 +2914,6 @@ bool hdmi_edid_is_audio_supported(void *input) return (edid_ctrl->basic_audio_supp || edid_ctrl->adb_size); } -u32 hdmi_edid_get_phys_width(void *input) -{ - struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input; - - if (!edid_ctrl) { - DEV_ERR("%s: invalid edid_ctrl data\n", __func__); - return 0; - } - - return (u32)edid_ctrl->physical_width * 10; /* return in mm */ -} - -u32 hdmi_edid_get_phys_height(void *input) -{ - struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input; - - if (!edid_ctrl) { - DEV_ERR("%s: invalid edid_ctrl data\n", __func__); - return 0; - } - - return (u32)edid_ctrl->physical_height * 10; /* return in mm */ -} - void hdmi_edid_deinit(void *input) { struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.h b/drivers/video/fbdev/msm/mdss_hdmi_edid.h index 9b3b5fa952b4..d258aa9f95bc 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_edid.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.h @@ -94,7 +94,5 @@ void hdmi_edid_set_max_pclk_rate(void *input, u32 max_pclk_khz); bool hdmi_edid_is_audio_supported(void *input); u32 hdmi_edid_get_sink_caps_max_tmds_clk(void *input); u8 hdmi_edid_get_colorimetry(void *input); -u32 hdmi_edid_get_phys_width(void *input); -u32 hdmi_edid_get_phys_height(void *input); #endif /* __HDMI_EDID_H__ */ diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c index 5651f58ab080..0b7605713b29 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c @@ -28,7 +28,6 @@ #include #include #include -#include #define REG_DUMP 0 @@ -3046,96 +3045,6 @@ static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl) DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL); } /* hdmi_tx_phy_reset */ -static u8 calc_infoframe_checksum(u8 *ptr, size_t size) -{ - u8 csum = 0; - size_t i; - - /* compute checksum */ - for (i = 0; i < size; i++) - csum += ptr[i]; - - return 256 - csum; -} - -static u8 hdmi_panel_set_hdr_checksum(struct mdp_hdr_stream *hdr_meta) -{ - u8 *buff; - u8 *ptr; - u32 length; - u32 size; - u32 checksum = 0; - u32 const type_code = 0x87; - u32 const version = 0x01; - u32 const descriptor_id = 0x00; - - /* length of metadata is 26 bytes */ - length = 0x1a; - /* add 4 bytes for the header */ - size = length + HDMI_INFOFRAME_HEADER_SIZE; - - buff = kzalloc(size, GFP_KERNEL); - - if (!buff) { - DEV_ERR("invalid buff\n"); - goto err_alloc; - } - - ptr = buff; - - buff[0] = type_code; - buff[1] = version; - buff[2] = length; - buff[3] = 0; - /* start infoframe payload */ - buff += HDMI_INFOFRAME_HEADER_SIZE; - - buff[0] = hdr_meta->eotf; - buff[1] = descriptor_id; - - buff[2] = hdr_meta->display_primaries_x[0] & 0xff; - buff[3] = hdr_meta->display_primaries_x[0] >> 8; - - buff[4] = hdr_meta->display_primaries_x[1] & 0xff; - buff[5] = hdr_meta->display_primaries_x[1] >> 8; - - buff[6] = hdr_meta->display_primaries_x[2] & 0xff; - buff[7] = hdr_meta->display_primaries_x[2] >> 8; - - buff[8] = hdr_meta->display_primaries_y[0] & 0xff; - buff[9] = hdr_meta->display_primaries_y[0] >> 8; - - buff[10] = hdr_meta->display_primaries_y[1] & 0xff; - buff[11] = hdr_meta->display_primaries_y[1] >> 8; - - buff[12] = hdr_meta->display_primaries_y[2] & 0xff; - buff[13] = hdr_meta->display_primaries_y[2] >> 8; - - buff[14] = hdr_meta->white_point_x & 0xff; - buff[15] = hdr_meta->white_point_x >> 8; - buff[16] = hdr_meta->white_point_y & 0xff; - buff[17] = hdr_meta->white_point_y >> 8; - - buff[18] = hdr_meta->max_luminance & 0xff; - buff[19] = hdr_meta->max_luminance >> 8; - - buff[20] = hdr_meta->min_luminance & 0xff; - buff[21] = hdr_meta->min_luminance >> 8; - - buff[22] = hdr_meta->max_content_light_level & 0xff; - buff[23] = hdr_meta->max_content_light_level >> 8; - - buff[24] = hdr_meta->max_average_light_level & 0xff; - buff[25] = hdr_meta->max_average_light_level >> 8; - - checksum = calc_infoframe_checksum(ptr, size); - - kfree(ptr); - -err_alloc: - return checksum; -} - static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) { u32 packet_payload = 0; @@ -3145,10 +3054,8 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) u32 const version = 0x01; u32 const length = 0x1a; u32 const descriptor_id = 0x00; - u8 checksum = 0; struct dss_io_data *io = NULL; - if (!ctrl) { pr_err("%s: invalid input\n", __func__); return; @@ -3169,18 +3076,7 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) packet_header = type_code | (version << 8) | (length << 16); DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header); - /** - * Checksum is not a mandatory field for - * the HDR infoframe as per CEA-861-3 specification. - * However some HDMI sinks still expect a - * valid checksum to be included as part of - * the infoframe. Hence compute and add - * the checksum to improve sink interoperability - * for our HDR solution on HDMI. - */ - checksum = hdmi_panel_set_hdr_checksum(&ctrl->hdr_ctrl.hdr_stream); - - packet_payload = ((ctrl->hdr_ctrl.hdr_stream.eotf << 8) | checksum); + packet_payload = (ctrl->hdr_ctrl.hdr_stream.eotf << 8); if (hdmi_tx_metadata_type_one(ctrl)) { packet_payload |= (descriptor_id << 16) @@ -3543,9 +3439,6 @@ static int hdmi_tx_power_off(struct hdmi_tx_ctrl *hdmi_ctrl) hdmi_ctrl->panel_power_on = false; hdmi_ctrl->vic = 0; - hdmi_ctrl->use_bt2020 = false; - hdmi_ctrl->curr_hdr_state = HDR_DISABLE; - if (hdmi_ctrl->hpd_off_pending || hdmi_ctrl->panel_suspend) hdmi_tx_hpd_off(hdmi_ctrl); @@ -4239,7 +4132,6 @@ sysfs_err: static int hdmi_tx_evt_handle_check_param(struct hdmi_tx_ctrl *hdmi_ctrl) { struct mdss_panel_info *pinfo = &hdmi_ctrl->panel_data.panel_info; - void *data = NULL; int new_vic = -1; int rc = 0; @@ -4251,10 +4143,6 @@ static int hdmi_tx_evt_handle_check_param(struct hdmi_tx_ctrl *hdmi_ctrl) goto end; } - data = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID); - pinfo->physical_width = hdmi_edid_get_phys_width(data); - pinfo->physical_height = hdmi_edid_get_phys_height(data); - /* * return value of 1 lets mdss know that panel * needs a reconfig due to new resolution and diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c index 6e5e29fe13db..f1ad2747064b 100644 --- a/drivers/video/fbdev/riva/fbdev.c +++ b/drivers/video/fbdev/riva/fbdev.c @@ -1088,9 +1088,6 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) int mode_valid = 0; NVTRACE_ENTER(); - if (!var->pixclock) - return -EINVAL; - switch (var->bits_per_pixel) { case 1 ... 8: var->red.offset = var->green.offset = var->blue.offset = 0; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 80e67b8e55be..7a3d722be30d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1019,7 +1019,7 @@ bool virtqueue_is_broken(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); - return READ_ONCE(vq->broken); + return vq->broken; } EXPORT_SYMBOL_GPL(virtqueue_is_broken); @@ -1033,9 +1033,7 @@ void virtio_break_device(struct virtio_device *dev) list_for_each_entry(_vq, &dev->vqs, list) { struct vring_virtqueue *vq = to_vvq(_vq); - - /* Pairs with READ_ONCE() in virtqueue_is_broken(). */ - WRITE_ONCE(vq->broken, true); + vq->broken = true; } } EXPORT_SYMBOL_GPL(virtio_break_device); diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index 83c586489142..365d6dff21de 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c @@ -39,7 +39,7 @@ static int w1_strong_pullup = 1; module_param_named(strong_pullup, w1_strong_pullup, int, 0); /* enable/disable CRC checking on DS28E04-100 memory accesses */ -static bool w1_enable_crccheck = true; +static char w1_enable_crccheck = 1; #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 @@ -346,18 +346,32 @@ static BIN_ATTR_RW(pio, 1); static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", w1_enable_crccheck); + if (put_user(w1_enable_crccheck + 0x30, buf)) + return -EFAULT; + + return sizeof(w1_enable_crccheck); } static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = kstrtobool(buf, &w1_enable_crccheck); + char val; + + if (count != 1 || !buf) + return -EINVAL; - if (err) - return err; + if (get_user(val, buf)) + return -EFAULT; - return count; + /* convert to decimal */ + val = val - 0x30; + if (val != 0 && val != 1) + return -EINVAL; + + /* set the new value */ + w1_enable_crccheck = val; + + return sizeof(w1_enable_crccheck); } static DEVICE_ATTR_RW(crccheck); diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 96bf71802eff..2b12ef019ae0 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -225,17 +225,15 @@ static int watchdog_set_timeout(int timeout) mutex_lock(&watchdog.lock); + watchdog.timeout = timeout; if (timeout > 0xff) { watchdog.timer_val = DIV_ROUND_UP(timeout, 60); watchdog.minutes_mode = true; - timeout = watchdog.timer_val * 60; } else { watchdog.timer_val = timeout; watchdog.minutes_mode = false; } - watchdog.timeout = timeout; - mutex_unlock(&watchdog.lock); return 0; diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c index fbdc0f32e666..ab7b8b185d99 100644 --- a/drivers/watchdog/lpc18xx_wdt.c +++ b/drivers/watchdog/lpc18xx_wdt.c @@ -309,7 +309,7 @@ static int lpc18xx_wdt_remove(struct platform_device *pdev) unregister_restart_handler(&lpc18xx_wdt->restart_handler); dev_warn(&pdev->dev, "I quit now, hardware will probably reboot!\n"); - del_timer_sync(&lpc18xx_wdt->timer); + del_timer(&lpc18xx_wdt->timer); watchdog_unregister_device(&lpc18xx_wdt->wdt_dev); clk_disable_unprepare(lpc18xx_wdt->wdt_clk); diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index 152db059d5aa..2eef58a0cf05 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -152,7 +152,7 @@ static void wdt_startup(void) static void wdt_turnoff(void) { /* Stop the timer */ - del_timer_sync(&timer); + del_timer(&timer); inb_p(wdt_stop); pr_info("Watchdog timer is now disabled...\n"); } diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c index 08500db8324f..1cfd3f6a13d5 100644 --- a/drivers/watchdog/sc520_wdt.c +++ b/drivers/watchdog/sc520_wdt.c @@ -190,7 +190,7 @@ static int wdt_startup(void) static int wdt_turnoff(void) { /* Stop the timer */ - del_timer_sync(&timer); + del_timer(&timer); /* Stop the watchdog */ wdt_config(0); diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c index 4b52cf321747..f0483c75ed32 100644 --- a/drivers/watchdog/w83877f_wdt.c +++ b/drivers/watchdog/w83877f_wdt.c @@ -170,7 +170,7 @@ static void wdt_startup(void) static void wdt_turnoff(void) { /* Stop the timer */ - del_timer_sync(&timer); + del_timer(&timer); wdt_change(WDT_DISABLE); diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index ca729f19061d..e902512fcfb5 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -46,11 +46,6 @@ static unsigned evtchn_2l_max_channels(void) return EVTCHN_2L_NR_CHANNELS; } -static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu) -{ - clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); -} - static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) { clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); @@ -75,6 +70,12 @@ static bool evtchn_2l_is_pending(unsigned port) return sync_test_bit(port, BM(&s->evtchn_pending[0])); } +static bool evtchn_2l_test_and_set_mask(unsigned port) +{ + struct shared_info *s = HYPERVISOR_shared_info; + return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0])); +} + static void evtchn_2l_mask(unsigned port) { struct shared_info *s = HYPERVISOR_shared_info; @@ -352,27 +353,18 @@ static void evtchn_2l_resume(void) EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); } -static int evtchn_2l_percpu_deinit(unsigned int cpu) -{ - memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * - EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); - - return 0; -} - static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, - .remove = evtchn_2l_remove, .bind_to_cpu = evtchn_2l_bind_to_cpu, .clear_pending = evtchn_2l_clear_pending, .set_pending = evtchn_2l_set_pending, .is_pending = evtchn_2l_is_pending, + .test_and_set_mask = evtchn_2l_test_and_set_mask, .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, .resume = evtchn_2l_resume, - .percpu_deinit = evtchn_2l_percpu_deinit, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 0c5b187dc7a0..ec4074c66d9d 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -99,7 +99,6 @@ static DEFINE_RWLOCK(evtchn_rwlock); * evtchn_rwlock * IRQ-desc lock * percpu eoi_list_lock - * irq_info->lock */ static LIST_HEAD(xen_irq_list_head); @@ -134,12 +133,12 @@ static void disable_dynirq(struct irq_data *data); static DEFINE_PER_CPU(unsigned int, irq_epoch); -static void clear_evtchn_to_irq_row(int *evtchn_row) +static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) - WRITE_ONCE(evtchn_row[col], -1); + WRITE_ONCE(evtchn_to_irq[row][col], -1); } static void clear_evtchn_to_irq_all(void) @@ -149,7 +148,7 @@ static void clear_evtchn_to_irq_all(void) for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; - clear_evtchn_to_irq_row(evtchn_to_irq[row]); + clear_evtchn_to_irq_row(row); } } @@ -157,7 +156,6 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) { unsigned row; unsigned col; - int *evtchn_row; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; @@ -170,18 +168,11 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) if (irq == -1) return 0; - evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); - if (evtchn_row == NULL) + evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); + if (evtchn_to_irq[row] == NULL) return -ENOMEM; - clear_evtchn_to_irq_row(evtchn_row); - - /* - * We've prepared an empty row for the mapping. If a different - * thread was faster inserting it, we can drop ours. - */ - if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) - free_page((unsigned long) evtchn_row); + clear_evtchn_to_irq_row(row); } WRITE_ONCE(evtchn_to_irq[row][col], irq); @@ -229,8 +220,6 @@ static int xen_irq_info_common_setup(struct irq_info *info, info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; - info->mask_reason = EVT_MASK_REASON_EXPLICIT; - raw_spin_lock_init(&info->lock); ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) @@ -297,7 +286,6 @@ static int xen_irq_info_pirq_setup(unsigned irq, static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); - xen_evtchn_port_remove(info->evtchn, info->cpu); info->evtchn = 0; } @@ -378,34 +366,6 @@ unsigned int cpu_from_evtchn(unsigned int evtchn) return ret; } -static void do_mask(struct irq_info *info, u8 reason) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&info->lock, flags); - - if (!info->mask_reason) - mask_evtchn(info->evtchn); - - info->mask_reason |= reason; - - raw_spin_unlock_irqrestore(&info->lock, flags); -} - -static void do_unmask(struct irq_info *info, u8 reason) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&info->lock, flags); - - info->mask_reason &= ~reason; - - if (!info->mask_reason) - unmask_evtchn(info->evtchn); - - raw_spin_unlock_irqrestore(&info->lock, flags); -} - #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { @@ -541,10 +501,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) } info->eoi_time = 0; - - /* is_active hasn't been reset yet, do it now. */ - smp_store_release(&info->is_active, 0); - do_unmask(info, EVT_MASK_REASON_EOI_PENDING); + unmask_evtchn(evtchn); } static void xen_irq_lateeoi_worker(struct work_struct *work) @@ -713,12 +670,6 @@ static void xen_evtchn_close(unsigned int port) BUG(); } -static void event_handler_exit(struct irq_info *info) -{ - smp_store_release(&info->is_active, 0); - clear_evtchn(info->evtchn); -} - static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -737,8 +688,7 @@ static void pirq_query_unmask(int irq) static void eoi_pirq(struct irq_data *data) { - struct irq_info *info = info_for_irq(data->irq); - int evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; @@ -747,15 +697,16 @@ static void eoi_pirq(struct irq_data *data) if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { - do_mask(info, EVT_MASK_REASON_TEMPORARY); + int masked = test_and_set_mask(evtchn); - event_handler_exit(info); + clear_evtchn(evtchn); irq_move_masked_irq(data); - do_unmask(info, EVT_MASK_REASON_TEMPORARY); + if (!masked) + unmask_evtchn(evtchn); } else - event_handler_exit(info); + clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); @@ -806,8 +757,7 @@ static unsigned int __startup_pirq(unsigned int irq) goto err; out: - do_unmask(info, EVT_MASK_REASON_EXPLICIT); - + unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; @@ -834,7 +784,7 @@ static void shutdown_pirq(struct irq_data *data) if (!VALID_EVTCHN(evtchn)) return; - do_mask(info, EVT_MASK_REASON_EXPLICIT); + mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } @@ -1590,8 +1540,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl) } info = info_for_irq(irq); - if (xchg_acquire(&info->is_active, 1)) - return; if (ctrl->defer_eoi) { info->eoi_cpu = smp_processor_id(); @@ -1698,8 +1646,8 @@ void rebind_evtchn_irq(int evtchn, int irq) static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) { struct evtchn_bind_vcpu bind_vcpu; - struct irq_info *info = info_for_irq(irq); - int evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(irq); + int masked; if (!VALID_EVTCHN(evtchn)) return -1; @@ -1715,7 +1663,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ - do_mask(info, EVT_MASK_REASON_TEMPORARY); + masked = test_and_set_mask(evtchn); /* * If this fails, it usually just indicates that we're dealing with a @@ -1725,7 +1673,8 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); - do_unmask(info, EVT_MASK_REASON_TEMPORARY); + if (!masked) + unmask_evtchn(evtchn); return 0; } @@ -1740,41 +1689,39 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, static void enable_dynirq(struct irq_data *data) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) - do_unmask(info, EVT_MASK_REASON_EXPLICIT); + unmask_evtchn(evtchn); } static void disable_dynirq(struct irq_data *data) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) - do_mask(info, EVT_MASK_REASON_EXPLICIT); + mask_evtchn(evtchn); } static void ack_dynirq(struct irq_data *data) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(data->irq); if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { - do_mask(info, EVT_MASK_REASON_TEMPORARY); + int masked = test_and_set_mask(evtchn); - event_handler_exit(info); + clear_evtchn(evtchn); irq_move_masked_irq(data); - do_unmask(info, EVT_MASK_REASON_TEMPORARY); + if (!masked) + unmask_evtchn(evtchn); } else - event_handler_exit(info); + clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) @@ -1783,51 +1730,18 @@ static void mask_ack_dynirq(struct irq_data *data) ack_dynirq(data); } -static void lateeoi_ack_dynirq(struct irq_data *data) -{ - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; - - if (!VALID_EVTCHN(evtchn)) - return; - - do_mask(info, EVT_MASK_REASON_EOI_PENDING); - - if (unlikely(irqd_is_setaffinity_pending(data)) && - likely(!irqd_irq_disabled(data))) { - do_mask(info, EVT_MASK_REASON_TEMPORARY); - - clear_evtchn(evtchn); - - irq_move_masked_irq(data); - - do_unmask(info, EVT_MASK_REASON_TEMPORARY); - } else - clear_evtchn(evtchn); -} - -static void lateeoi_mask_ack_dynirq(struct irq_data *data) -{ - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; - - if (VALID_EVTCHN(evtchn)) { - do_mask(info, EVT_MASK_REASON_EXPLICIT); - ack_dynirq(data); - } -} - static int retrigger_dynirq(struct irq_data *data) { - struct irq_info *info = info_for_irq(data->irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + unsigned int evtchn = evtchn_from_irq(data->irq); + int masked; if (!VALID_EVTCHN(evtchn)) return 0; - do_mask(info, EVT_MASK_REASON_TEMPORARY); + masked = test_and_set_mask(evtchn); set_evtchn(evtchn); - do_unmask(info, EVT_MASK_REASON_TEMPORARY); + if (!masked) + unmask_evtchn(evtchn); return 1; } @@ -1922,11 +1836,10 @@ static void restore_cpu_ipis(unsigned int cpu) /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { - struct irq_info *info = info_for_irq(irq); - evtchn_port_t evtchn = info ? info->evtchn : 0; + int evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) - event_handler_exit(info); + clear_evtchn(evtchn); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) @@ -2035,8 +1948,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_ack = lateeoi_ack_dynirq, - .irq_mask_ack = lateeoi_mask_ack_dynirq, + .irq_ack = mask_ack_dynirq, + .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 5e6ff2120132..7addca0d8d26 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -209,6 +209,12 @@ static bool evtchn_fifo_is_pending(unsigned port) return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } +static bool evtchn_fifo_test_and_set_mask(unsigned port) +{ + event_word_t *word = event_word_from_port(port); + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +} + static void evtchn_fifo_mask(unsigned port) { event_word_t *word = event_word_from_port(port); @@ -415,6 +421,7 @@ static const struct evtchn_ops evtchn_ops_fifo = { .clear_pending = evtchn_fifo_clear_pending, .set_pending = evtchn_fifo_set_pending, .is_pending = evtchn_fifo_is_pending, + .test_and_set_mask = evtchn_fifo_test_and_set_mask, .mask = evtchn_fifo_mask, .unmask = evtchn_fifo_unmask, .handle_events = evtchn_fifo_handle_events, diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index cc37b711491c..b9b4f5919893 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h @@ -35,19 +35,13 @@ struct irq_info { struct list_head eoi_list; short refcnt; short spurious_cnt; - short type; /* type */ - u8 mask_reason; /* Why is event channel masked */ -#define EVT_MASK_REASON_EXPLICIT 0x01 -#define EVT_MASK_REASON_TEMPORARY 0x02 -#define EVT_MASK_REASON_EOI_PENDING 0x04 - u8 is_active; /* Is event just being handled? */ + enum xen_irq_type type; /* type */ unsigned irq; unsigned int evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ unsigned short eoi_cpu; /* EOI must happen on this cpu */ unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */ u64 eoi_time; /* Time in jiffies when to EOI. */ - raw_spinlock_t lock; union { unsigned short virq; @@ -73,12 +67,12 @@ struct evtchn_ops { unsigned (*nr_channels)(void); int (*setup)(struct irq_info *info); - void (*remove)(evtchn_port_t port, unsigned int cpu); void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); void (*clear_pending)(unsigned port); void (*set_pending)(unsigned port); bool (*is_pending)(unsigned port); + bool (*test_and_set_mask)(unsigned port); void (*mask)(unsigned port); void (*unmask)(unsigned port); @@ -115,13 +109,6 @@ static inline int xen_evtchn_port_setup(struct irq_info *info) return 0; } -static inline void xen_evtchn_port_remove(evtchn_port_t evtchn, - unsigned int cpu) -{ - if (evtchn_ops->remove) - evtchn_ops->remove(evtchn, cpu); -} - static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info, unsigned cpu) { @@ -143,6 +130,11 @@ static inline bool test_evtchn(unsigned port) return evtchn_ops->is_pending(port); } +static inline bool test_and_set_mask(unsigned port) +{ + return evtchn_ops->test_and_set_mask(port); +} + static inline void mask_evtchn(unsigned port) { return evtchn_ops->mask(port); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 6f077ae0cf31..1865bcfa869b 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -293,47 +293,36 @@ static int map_grant_pages(struct grant_map *map) * to the kernel linear addresses of the struct pages. * These ptes are completely different from the user ptes dealt * with find_grant_ptes. - * Note that GNTMAP_device_map isn't needed here: The - * dev_bus_addr output field gets consumed only from ->map_ops, - * and by not requesting it when mapping we also avoid needing - * to mirror dev_bus_addr into ->unmap_ops (and holding an extra - * reference to the page in the hypervisor). */ - unsigned int flags = (map->flags & ~GNTMAP_device_map) | - GNTMAP_host_map; - for (i = 0; i < map->count; i++) { unsigned long address = (unsigned long) pfn_to_kaddr(page_to_pfn(map->pages[i])); BUG_ON(PageHighMem(map->pages[i])); - gnttab_set_map_op(&map->kmap_ops[i], address, flags, + gnttab_set_map_op(&map->kmap_ops[i], address, + map->flags | GNTMAP_host_map, map->grants[i].ref, map->grants[i].domid); gnttab_set_unmap_op(&map->kunmap_ops[i], address, - flags, -1); + map->flags | GNTMAP_host_map, -1); } } pr_debug("map %d+%d\n", map->index, map->count); err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, map->pages, map->count); + if (err) + return err; for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status == GNTST_okay) - map->unmap_ops[i].handle = map->map_ops[i].handle; - else if (!err) + if (map->map_ops[i].status) { err = -EINVAL; - - if (map->flags & GNTMAP_device_map) - map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; - - if (use_ptemod) { - if (map->kmap_ops[i].status == GNTST_okay) - map->kunmap_ops[i].handle = map->kmap_ops[i].handle; - else if (!err) - err = -EINVAL; + continue; } + + map->unmap_ops[i].handle = map->map_ops[i].handle; + if (use_ptemod) + map->kunmap_ops[i].handle = map->kmap_ops[i].handle; } return err; } diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index daa2e89a50fa..b1a1d7de0894 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -159,7 +159,7 @@ static void *pm_ctrl_init(struct pci_dev *dev, int offset) } out: - return err ? ERR_PTR(err) : NULL; + return ERR_PTR(err); } static const struct config_field caplist_pm[] = { diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c index e6c7509a3d87..c99f8bb1c56c 100644 --- a/drivers/xen/xen-pciback/vpci.c +++ b/drivers/xen/xen-pciback/vpci.c @@ -68,7 +68,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, int devid, publish_pci_dev_cb publish_cb) { - int err = 0, slot, func = PCI_FUNC(dev->devfn); + int err = 0, slot, func = -1; struct pci_dev_entry *t, *dev_entry; struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; @@ -93,26 +93,23 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, /* * Keep multi-function devices together on the virtual PCI bus, except - * that we want to keep virtual functions at func 0 on their own. They - * aren't multi-function devices and hence their presence at func 0 - * may cause guests to not scan the other functions. + * virtual functions. */ - if (!dev->is_virtfn || func) { + if (!dev->is_virtfn) { for (slot = 0; slot < PCI_SLOT_MAX; slot++) { if (list_empty(&vpci_dev->dev_list[slot])) continue; t = list_entry(list_first(&vpci_dev->dev_list[slot]), struct pci_dev_entry, list); - if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn)) - continue; if (match_slot(dev, t->dev)) { pr_info("vpci: %s: assign to virtual slot %d func %d\n", pci_name(dev), slot, - func); + PCI_FUNC(dev->devfn)); list_add_tail(&dev_entry->list, &vpci_dev->dev_list[slot]); + func = PCI_FUNC(dev->devfn); goto unlock; } } @@ -125,6 +122,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, pci_name(dev), slot); list_add_tail(&dev_entry->list, &vpci_dev->dev_list[slot]); + func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn); goto unlock; } } diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index c784a01aa8cb..48196347f2f9 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -359,8 +359,7 @@ out: return err; } -static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev, - enum xenbus_state state) +static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev) { int err = 0; int num_devs; @@ -374,7 +373,9 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev, dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n"); mutex_lock(&pdev->dev_lock); - if (xenbus_read_driver_state(pdev->xdev->nodename) != state) + /* Make sure we only reconfigure once */ + if (xenbus_read_driver_state(pdev->xdev->nodename) != + XenbusStateReconfiguring) goto out; err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d", @@ -501,10 +502,6 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev, } } - if (state != XenbusStateReconfiguring) - /* Make sure we only reconfigure once. */ - goto out; - err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured); if (err) { xenbus_dev_fatal(pdev->xdev, err, @@ -530,7 +527,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev, break; case XenbusStateReconfiguring: - xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring); + xen_pcibk_reconfigure(pdev); break; case XenbusStateConnected: @@ -669,15 +666,6 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch, xen_pcibk_setup_backend(pdev); break; - case XenbusStateInitialised: - /* - * We typically move to Initialised when the first device was - * added. Hence subsequent devices getting added may need - * reconfiguring. - */ - xen_pcibk_reconfigure(pdev, XenbusStateInitialised); - break; - default: break; } @@ -703,7 +691,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev, /* watch the backend node for backend configuration information */ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch, - NULL, xen_pcibk_be_watch); + xen_pcibk_be_watch); if (err) goto out; diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 7fb5a2e7fa81..29a1b8054a4d 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -415,12 +415,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, return 0; err = gnttab_map_refs(map, NULL, pg, cnt); + BUG_ON(err); for (i = 0; i < cnt; i++) { if (unlikely(map[i].status != GNTST_okay)) { pr_err("invalid buffer -- could not remap it\n"); map[i].handle = SCSIBACK_INVALID_HANDLE; - if (!err) - err = -ENOMEM; + err = -ENOMEM; } else { get_page(pg[i]); } diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 8bbd887ca422..266f446ba331 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -114,22 +114,18 @@ EXPORT_SYMBOL_GPL(xenbus_strstate); */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, - bool (*will_handle)(struct xenbus_watch *, - const char **, unsigned int), void (*callback)(struct xenbus_watch *, const char **, unsigned int)) { int err; watch->node = path; - watch->will_handle = will_handle; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; - watch->will_handle = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } @@ -156,8 +152,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path); */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, - bool (*will_handle)(struct xenbus_watch *, - const char **, unsigned int), void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...) @@ -174,7 +168,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } - err = xenbus_watch_path(dev, path, watch, will_handle, callback); + err = xenbus_watch_path(dev, path, watch, callback); if (err) kfree(path); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index d7f886dd7b55..c2d447687e33 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -137,7 +137,6 @@ static int watch_otherend(struct xenbus_device *dev) container_of(dev->dev.bus, struct xen_bus_type, bus); return xenbus_watch_pathfmt(dev, &dev->otherend_watch, - bus->otherend_will_handle, bus->otherend_changed, "%s/%s", dev->otherend, "state"); } @@ -764,7 +763,7 @@ static struct notifier_block xenbus_resume_nb = { static int __init xenbus_init(void) { - int err; + int err = 0; uint64_t v = 0; xen_store_domain_type = XS_UNKNOWN; @@ -804,29 +803,6 @@ static int __init xenbus_init(void) err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; - /* - * Uninitialized hvm_params are zero and return no error. - * Although it is theoretically possible to have - * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is - * not zero when valid. If zero, it means that Xenstore hasn't - * been properly initialized. Instead of attempting to map a - * wrong guest physical address return error. - * - * Also recognize all bits set as an invalid value. - */ - if (!v || !~v) { - err = -ENOENT; - goto out_error; - } - /* Avoid truncation on 32-bit. */ -#if BITS_PER_LONG == 32 - if (v > ULONG_MAX) { - pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", - __func__, v); - err = -EINVAL; - goto out_error; - } -#endif xen_store_gfn = (unsigned long)v; xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, @@ -855,10 +831,8 @@ static int __init xenbus_init(void) */ proc_mkdir("xen", NULL); #endif - return 0; out_error: - xen_store_domain_type = XS_UNKNOWN; return err; } diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h index 2c394c6ba605..c9ec7ca1f7ab 100644 --- a/drivers/xen/xenbus/xenbus_probe.h +++ b/drivers/xen/xenbus/xenbus_probe.h @@ -42,8 +42,6 @@ struct xen_bus_type { int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename); int (*probe)(struct xen_bus_type *bus, const char *type, const char *dir); - bool (*otherend_will_handle)(struct xenbus_watch *watch, - const char **vec, unsigned int len); void (*otherend_changed)(struct xenbus_watch *watch, const char **vec, unsigned int len); struct bus_type bus; diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c index 597c0b038454..04f7f85a5edf 100644 --- a/drivers/xen/xenbus/xenbus_probe_backend.c +++ b/drivers/xen/xenbus/xenbus_probe_backend.c @@ -181,12 +181,6 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type, return err; } -static bool frontend_will_handle(struct xenbus_watch *watch, - const char **vec, unsigned int len) -{ - return watch->nr_pending == 0; -} - static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { @@ -198,7 +192,6 @@ static struct xen_bus_type xenbus_backend = { .levels = 3, /* backend/type// */ .get_bus_id = backend_bus_id, .probe = xenbus_probe_backend, - .otherend_will_handle = frontend_will_handle, .otherend_changed = frontend_changed, .bus = { .name = "xen-backend", diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index d98d88fae58a..ce65591b4168 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -701,8 +701,6 @@ int register_xenbus_watch(struct xenbus_watch *watch) sprintf(token, "%lX", (long)watch); - watch->nr_pending = 0; - down_read(&xs_state.watch_mutex); spin_lock(&watches_lock); @@ -752,15 +750,12 @@ void unregister_xenbus_watch(struct xenbus_watch *watch) /* Cancel pending watch events. */ spin_lock(&watch_events_lock); - if (watch->nr_pending) { - list_for_each_entry_safe(msg, tmp, &watch_events, list) { - if (msg->u.watch.handle != watch) - continue; - list_del(&msg->list); - kfree(msg->u.watch.vec); - kfree(msg); - } - watch->nr_pending = 0; + list_for_each_entry_safe(msg, tmp, &watch_events, list) { + if (msg->u.watch.handle != watch) + continue; + list_del(&msg->list); + kfree(msg->u.watch.vec); + kfree(msg); } spin_unlock(&watch_events_lock); @@ -807,6 +802,7 @@ void xs_suspend_cancel(void) static int xenwatch_thread(void *unused) { + struct list_head *ent; struct xs_stored_msg *msg; for (;;) { @@ -819,15 +815,13 @@ static int xenwatch_thread(void *unused) mutex_lock(&xenwatch_mutex); spin_lock(&watch_events_lock); - msg = list_first_entry_or_null(&watch_events, - struct xs_stored_msg, list); - if (msg) { - list_del(&msg->list); - msg->u.watch.handle->nr_pending--; - } + ent = watch_events.next; + if (ent != &watch_events) + list_del(ent); spin_unlock(&watch_events_lock); - if (msg) { + if (ent != &watch_events) { + msg = list_entry(ent, struct xs_stored_msg, list); msg->u.watch.handle->callback( msg->u.watch.handle, (const char **)msg->u.watch.vec, @@ -909,15 +903,9 @@ static int process_msg(void) spin_lock(&watches_lock); msg->u.watch.handle = find_watch( msg->u.watch.vec[XS_WATCH_TOKEN]); - if (msg->u.watch.handle != NULL && - (!msg->u.watch.handle->will_handle || - msg->u.watch.handle->will_handle( - msg->u.watch.handle, - (const char **)msg->u.watch.vec, - msg->u.watch.vec_size))) { + if (msg->u.watch.handle != NULL) { spin_lock(&watch_events_lock); list_add_tail(&msg->list, &watch_events); - msg->u.watch.handle->nr_pending++; wake_up(&watch_events_waitq); spin_unlock(&watch_events_lock); } else { diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 72c03354c14b..2d0cbbd14cfc 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -1,7 +1,6 @@ config BINFMT_ELF bool "Kernel support for ELF binaries" depends on MMU && (BROKEN || !FRV) - select ELFCORE default y ---help--- ELF (Executable and Linkable Format) is a format for libraries and @@ -27,7 +26,6 @@ config BINFMT_ELF config COMPAT_BINFMT_ELF bool depends on COMPAT && BINFMT_ELF - select ELFCORE config ARCH_BINFMT_ELF_STATE bool @@ -36,7 +34,6 @@ config BINFMT_ELF_FDPIC bool "Kernel support for FDPIC ELF binaries" default y depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) - select ELFCORE help ELF FDPIC binaries are based on ELF, but allow the individual load segments of a binary to be located in memory independently of each @@ -46,11 +43,6 @@ config BINFMT_ELF_FDPIC It is also possible to run FDPIC ELF binaries on MMU linux also. -config ELFCORE - bool - help - This option enables kernel/elfcore.o. - config CORE_DUMP_DEFAULT_ELF_HEADERS bool "Write ELF core dumps with partial segments" default y diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index fd6b67c40d9d..80e9c18ea64f 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -9,8 +9,6 @@ config BTRFS_FS select RAID6_PQ select XOR_BLOCKS select SRCU - depends on !PPC_256K_PAGES # powerpc - depends on !PAGE_SIZE_256KB # hexagon help Btrfs is a general purpose copy-on-write filesystem with extents, diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index d096254d9acc..205d6b43cd7d 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -261,13 +261,6 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) ordered_list); if (!test_bit(WORK_DONE_BIT, &work->flags)) break; - /* - * Orders all subsequent loads after reading WORK_DONE_BIT, - * paired with the smp_mb__before_atomic in btrfs_work_helper - * this guarantees that the ordered function will see all - * updates from ordinary work function. - */ - smp_rmb(); /* * we are going to call the ordered done function, but @@ -317,13 +310,6 @@ static void normal_work_helper(struct btrfs_work *work) thresh_exec_hook(wq); work->func(work); if (need_order) { - /* - * Ensures all memory accesses done in the work function are - * ordered before setting the WORK_DONE_BIT. Ensuring the thread - * which is going to executed the ordered work sees them. - * Pairs with the smp_rmb in run_ordered_work. - */ - smp_mb__before_atomic(); set_bit(WORK_DONE_BIT, &work->flags); run_ordered_work(wq); } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 00c9a9e719ec..228bfa19b745 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -975,12 +975,7 @@ again: ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; - if (ret == 0) { - /* This shouldn't happen, indicates a bug or fs corruption. */ - ASSERT(ret != 0); - ret = -EUCLEAN; - goto out; - } + BUG_ON(ret == 0); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (trans && likely(trans->type != __TRANS_DUMMY) && @@ -1109,18 +1104,10 @@ again: goto out; if (!ret && extent_item_pos) { /* - * We've recorded that parent, so we must extend - * its inode list here. - * - * However if there was corruption we may not - * have found an eie, return an error in this - * case. + * we've recorded that parent, so we must extend + * its inode list here */ - ASSERT(eie); - if (!eie) { - ret = -EUCLEAN; - goto out; - } + BUG_ON(!eie); while (eie->next) eie = eie->next; eie->next = ref->inode_list; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 92601775ec5e..bae05c5c75ba 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -290,7 +290,7 @@ static void end_compressed_bio_write(struct bio *bio) cb->start, cb->start + cb->len - 1, NULL, - !cb->errors); + bio->bi_error ? 0 : 1); cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fbb4c81f6311..3fa0515d7685 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1129,8 +1129,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ret = update_ref_for_cow(trans, root, buf, cow, &last_ref); if (ret) { - btrfs_tree_unlock(cow); - free_extent_buffer(cow); btrfs_abort_transaction(trans, root, ret); return ret; } @@ -1138,8 +1136,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) { ret = btrfs_reloc_cow_block(trans, root, buf, cow); if (ret) { - btrfs_tree_unlock(cow); - free_extent_buffer(cow); btrfs_abort_transaction(trans, root, ret); return ret; } @@ -1178,8 +1174,6 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, if (last_ref) { ret = tree_mod_log_free_eb(root->fs_info, buf); if (ret) { - btrfs_tree_unlock(cow); - free_extent_buffer(cow); btrfs_abort_transaction(trans, root, ret); return ret; } @@ -1431,9 +1425,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_warn(root->fs_info, "failed to read tree block %llu from get_old_root", logical); } else { - btrfs_tree_read_lock(old); eb = btrfs_clone_extent_buffer(old); - btrfs_tree_read_unlock(old); free_extent_buffer(old); } } else if (old_root) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2426dc56426f..73b547f88bfc 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1089,7 +1089,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_nr = 0; int del_slot = 0; int recow; - int ret = 0; + int ret; u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); @@ -1284,7 +1284,7 @@ again: } out: btrfs_free_path(path); - return ret; + return 0; } /* diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 55d8020afc58..05b1b0f99f0b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -754,10 +754,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, while (num_entries) { e = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); - if (!e) { - ret = -ENOMEM; + if (!e) goto free_cache; - } ret = io_ctl_read_entry(&io_ctl, e, &type); if (ret) { @@ -766,7 +764,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } if (!e->bytes) { - ret = -1; kmem_cache_free(btrfs_free_space_cachep, e); goto free_cache; } @@ -786,7 +783,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, num_bitmaps--; e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!e->bitmap) { - ret = -ENOMEM; kmem_cache_free( btrfs_free_space_cachep, e); goto free_cache; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 92415b8ac5a3..5467e168cffd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6930,7 +6930,7 @@ again: found_type == BTRFS_FILE_EXTENT_PREALLOC) { /* Only regular file could have regular/prealloc extent */ if (!S_ISREG(inode->i_mode)) { - err = -EUCLEAN; + ret = -EUCLEAN; btrfs_crit(root->fs_info, "regular/prealloc extent found for non-regular inode %llu", btrfs_ino(inode)); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index bc4cc417e7ab..18e667fbd054 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2288,10 +2288,8 @@ out: } btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); - if (done && !ret) { + if (done && !ret) ret = 1; - fs_info->qgroup_rescan_progress.objectid = (u64)-1; - } return ret; } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 628b6a046093..df04309390bb 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1328,7 +1328,9 @@ static void __del_reloc_root(struct btrfs_root *root) RB_CLEAR_NODE(&node->rb_node); } spin_unlock(&rc->reloc_root_tree.lock); - ASSERT(!node || (struct btrfs_root *)node->data == root); + if (!node) + return; + BUG_ON((struct btrfs_root *)node->data != root); } spin_lock(&root->fs_info->trans_lock); @@ -1785,8 +1787,8 @@ int replace_path(struct btrfs_trans_handle *trans, int ret; int slot; - ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); - ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); + BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); + BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); last_snapshot = btrfs_root_last_snapshot(&src->root_item); again: @@ -1818,7 +1820,7 @@ again: parent = eb; while (1) { level = btrfs_header_level(parent); - ASSERT(level >= lowest_level); + BUG_ON(level < lowest_level); ret = btrfs_bin_search(parent, &key, level, &slot); if (ret && slot > 0) diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 0b41a88ef9e9..cc9ccc42f469 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -918,6 +918,11 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) have_csum = sblock_to_check->pagev[0]->have_csum; dev = sblock_to_check->pagev[0]->dev; + if (sctx->is_dev_replace && !is_metadata && !have_csum) { + sblocks_for_recheck = NULL; + goto nodatasum_case; + } + /* * read all mirrors one after the other. This includes to * re-read the extent or metadata block that failed (that was @@ -1030,19 +1035,13 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) goto out; } - /* - * NOTE: Even for nodatasum case, it's still possible that it's a - * compressed data extent, thus scrub_fixup_nodatasum(), which write - * inode page cache onto disk, could cause serious data corruption. - * - * So here we could only read from disk, and hope our recovery could - * reach disk before the newer write. - */ - if (0 && !is_metadata && !have_csum) { + if (!is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; WARN_ON(sctx->is_dev_replace); +nodatasum_case: + /* * !is_metadata and !have_csum, this means that the data * might not be COW'ed, that it might be modified diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 2825cbe3ea8d..69255148f0c8 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -48,13 +48,7 @@ static struct file_system_type test_type = { struct inode *btrfs_new_test_inode(void) { - struct inode *inode; - - inode = new_inode(test_mnt->mnt_sb); - if (inode) - inode_init_owner(inode, NULL, S_IFREG); - - return inode; + return new_inode(test_mnt->mnt_sb); } int btrfs_init_test_fs(void) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f0675b7c95ec..64e449eb2ecd 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1264,10 +1264,8 @@ int btrfs_defrag_root(struct btrfs_root *root) while (1) { trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - break; - } + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_defrag_leaves(trans, root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index bcf61a32f970..20c911028647 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1511,7 +1511,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, break; if (ret == 1) { - ret = 0; if (path->slots[0] == 0) break; path->slots[0]--; @@ -1524,19 +1523,17 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); if (ret) - break; + goto out; btrfs_release_path(path); inode = read_one_inode(root, key.offset); - if (!inode) { - ret = -EIO; - break; - } + if (!inode) + return -EIO; ret = fixup_inode_link_count(trans, root, inode); iput(inode); if (ret) - break; + goto out; /* * fixup on a directory may create new entries, @@ -1545,6 +1542,8 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } + ret = 0; +out: btrfs_release_path(path); return ret; } @@ -1583,6 +1582,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, root, inode); } else if (ret == -EEXIST) { ret = 0; + } else { + BUG(); /* Logic Error */ } iput(inode); @@ -2207,9 +2208,7 @@ again: else { ret = find_dir_range(log, path, dirid, key_type, &range_start, &range_end); - if (ret < 0) - goto out; - else if (ret > 0) + if (ret != 0) break; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d6383d362e27..cd1e9411f926 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2357,6 +2357,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) btrfs_set_super_num_devices(root->fs_info->super_copy, tmp + 1); + /* add sysfs device entry */ + btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); + /* * we've got more storage, clear any full flags on the space * infos @@ -2364,10 +2367,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) btrfs_clear_space_info_full(root->fs_info); unlock_chunks(root); - - /* add sysfs device entry */ - btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device); - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (seeding_dev) { diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index bec37093a3de..e16cda37ab67 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -72,6 +72,10 @@ static int ceph_set_page_dirty(struct page *page) struct inode *inode; struct ceph_inode_info *ci; struct ceph_snap_context *snapc; + int ret; + + if (unlikely(!mapping)) + return !TestSetPageDirty(page); if (PageDirty(page)) { dout("%p set_page_dirty %p idx %lu -- already dirty\n", @@ -117,7 +121,11 @@ static int ceph_set_page_dirty(struct page *page) page->private = (unsigned long)snapc; SetPagePrivate(page); - return __set_page_dirty_nobuffers(page); + ret = __set_page_dirty_nobuffers(page); + WARN_ON(!PageLocked(page)); + WARN_ON(!page->mapping); + + return ret; } /* diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 154c47282a34..49e693232916 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -920,19 +920,12 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) { struct ceph_mds_session *session = cap->session; struct ceph_inode_info *ci = cap->ci; - struct ceph_mds_client *mdsc; + struct ceph_mds_client *mdsc = + ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; int removed = 0; - /* 'ci' being NULL means the remove have already occurred */ - if (!ci) { - dout("%s: cap inode is NULL\n", __func__); - return; - } - dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); - mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; - /* remove from inode's cap rbtree, and clear auth cap */ rb_erase(&cap->ci_node, &ci->i_caps); if (ci->i_auth_cap == cap) @@ -1545,8 +1538,6 @@ static int __mark_caps_flushing(struct inode *inode, * try to invalidate mapping pages without blocking. */ static int try_nonblocking_invalidate(struct inode *inode) - __releases(ci->i_ceph_lock) - __acquires(ci->i_ceph_lock) { struct ceph_inode_info *ci = ceph_inode(inode); u32 invalidating_gen = ci->i_rdcache_gen; diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index e5e780145728..942874257a09 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -367,9 +367,14 @@ cifs_strndup_from_utf16(const char *src, const int maxlen, if (!dst) return NULL; cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage, - NO_MAP_UNI_RSVD); + NO_MAP_UNI_RSVD); } else { - dst = kstrndup(src, maxlen, GFP_KERNEL); + len = strnlen(src, maxlen); + len++; + dst = kmalloc(len, GFP_KERNEL); + if (!dst) + return NULL; + strlcpy(dst, src, len); } return dst; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3baf210f5995..4f4fc9ff3636 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -204,7 +204,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) rc = server->ops->queryfs(xid, tcon, buf); free_xid(xid); - return rc; + return 0; } static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) @@ -745,7 +745,6 @@ cifs_do_mount(struct file_system_type *fs_type, out_super: deactivate_locked_super(sb); - return root; out: cifs_cleanup_volume_info(volume_info); return root; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index cda22b312a4c..f4ef8d6ea8ed 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -783,8 +783,6 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); - cancel_delayed_work_sync(&server->echo); - spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); @@ -2961,10 +2959,9 @@ cifs_match_super(struct super_block *sb, void *data) spin_lock(&cifs_tcp_ses_lock); cifs_sb = CIFS_SB(sb); tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); - if (tlink == NULL) { - /* can not match superblock if tlink were ever null */ + if (IS_ERR(tlink)) { spin_unlock(&cifs_tcp_ses_lock); - return 0; + return rc; } tcon = tlink_tcon(tlink); ses = tcon->ses; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 9f1641324a81..be16da31cbcc 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -831,7 +831,6 @@ static int cifs_d_revalidate(struct dentry *direntry, unsigned int flags) { struct inode *inode; - int rc; if (flags & LOOKUP_RCU) return -ECHILD; @@ -841,25 +840,8 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags) if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) CIFS_I(inode)->time = 0; /* force reval */ - rc = cifs_revalidate_dentry(direntry); - if (rc) { - cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc); - switch (rc) { - case -ENOENT: - case -ESTALE: - /* - * Those errors mean the dentry is invalid - * (file was deleted or recreated) - */ - return 0; - default: - /* - * Otherwise some unexpected error happened - * report it as-is to VFS layer - */ - return rc; - } - } + if (cifs_revalidate_dentry(direntry)) + return 0; else { /* * If the inode wasn't known to be a dfs entry when diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 62cc0c22db63..a5008d692c2a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -163,7 +163,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode, goto posix_open_ret; } } else { - cifs_revalidate_mapping(*pinode); cifs_fattr_to_inode(*pinode, &fattr); } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 2d3918cdcc28..9bc7a29f88d6 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -602,7 +602,7 @@ sess_alloc_buffer(struct sess_data *sess_data, int wct) return 0; out_free_smb_buf: - cifs_small_buf_release(smb_buf); + kfree(smb_buf); sess_data->iov[0].iov_base = NULL; sess_data->iov[0].iov_len = 0; sess_data->buf0_type = CIFS_NO_BUFFER; diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 19baeb4ca511..44198b9a5315 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -633,8 +633,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) } } spin_unlock(&cifs_tcp_ses_lock); - cifs_dbg(FYI, "No file id matched, oplock break ignored\n"); - return true; + cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); + return false; } void diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index c173d047b44b..087261ca6d46 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -619,8 +619,6 @@ smb2_clone_range(const unsigned int xid, cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk)); /* Request server copy to target from src identified by key */ - kfree(retbuf); - retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, true /* is_fsctl */, (char *)pcchunk, diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index a41c030fd962..8e709b641b55 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -50,14 +50,6 @@ DECLARE_RWSEM(configfs_rename_sem); */ DEFINE_SPINLOCK(configfs_dirent_lock); -/* - * All of link_obj/unlink_obj/link_group/unlink_group require that - * subsys->su_mutex is held. - * But parent configfs_subsystem is NULL when config_item is root. - * Use this mutex when config_item is root. - */ -static DEFINE_MUTEX(configfs_subsystem_mutex); - static void configfs_d_iput(struct dentry * dentry, struct inode * inode) { @@ -1768,9 +1760,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) group->cg_item.ci_name = group->cg_item.ci_namebuf; sd = root->d_fsdata; - mutex_lock(&configfs_subsystem_mutex); link_group(to_config_group(sd->s_element), group); - mutex_unlock(&configfs_subsystem_mutex); mutex_lock_nested(&d_inode(root)->i_mutex, I_MUTEX_PARENT); @@ -1795,9 +1785,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) mutex_unlock(&d_inode(root)->i_mutex); if (err) { - mutex_lock(&configfs_subsystem_mutex); unlink_group(group); - mutex_unlock(&configfs_subsystem_mutex); configfs_release_fs(); } @@ -1836,9 +1824,7 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) dput(dentry); - mutex_lock(&configfs_subsystem_mutex); unlink_group(group); - mutex_unlock(&configfs_subsystem_mutex); configfs_release_fs(); } diff --git a/fs/direct-io.c b/fs/direct-io.c index 5afb6e260c84..da574a74a467 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -794,7 +794,6 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, struct buffer_head *map_bh) { int ret = 0; - int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ if (dio->rw & WRITE) { /* @@ -833,10 +832,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; out: /* - * If boundary then we want to schedule the IO now to + * If sdio->boundary then we want to schedule the IO now to * avoid metadata seeks. */ - if (boundary) { + if (sdio->boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); if (sdio->bio) dio_bio_submit(dio, sdio); diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 3b79c0284a30..eea64912c9c0 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -545,7 +545,6 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) if (bucket >= ls->ls_rsbtbl_size) { kfree(ri); - ++*pos; return NULL; } tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index ffab7dc88157..3a7f401e943c 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -3975,14 +3975,6 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) int from = ms->m_header.h_nodeid; int error = 0; - /* currently mixing of user/kernel locks are not supported */ - if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) { - log_error(lkb->lkb_resource->res_ls, - "got user dlm message for a kernel lock"); - error = -EINVAL; - goto out; - } - switch (ms->m_type) { case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: @@ -4011,7 +4003,6 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) error = -EINVAL; } -out: if (error) log_error(lkb->lkb_resource->res_ls, "ignore invalid message %d from %d %x %x %x %d", diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 99f4cd91910f..9d7a4a714907 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -554,7 +554,7 @@ static void close_connection(struct connection *con, bool and_other, } if (con->othercon && and_other) { /* Will only re-enter once. */ - close_connection(con->othercon, false, tx, rx); + close_connection(con->othercon, false, true, true); } if (con->rx_page) { __free_page(con->rx_page); diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 151e6c252e78..76fc5295e120 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -359,8 +359,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, struct extent_crypt_result ecr; int rc = 0; - BUG_ON(!crypt_stat || !crypt_stat->tfm - || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); + if (!crypt_stat || !crypt_stat->tfm + || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) + return -EINVAL; + if (unlikely(ecryptfs_verbosity > 0)) { ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", ecryptfs_get_key_size_to_enc_data(crypt_stat)); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index a8a05fb1ee01..b7426496ac80 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -624,12 +624,6 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } - if (!dev_name) { - rc = -EINVAL; - err = "Device name cannot be null"; - goto out; - } - rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; diff --git a/fs/exec.c b/fs/exec.c index a9d2fdf8330b..9febd05be1f3 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -875,7 +875,7 @@ static int exec_mmap(struct mm_struct *mm) /* Notify parent that we're no longer interested in the old VM */ tsk = current; old_mm = current->mm; - exec_mm_release(tsk, old_mm); + mm_release(tsk, old_mm); if (old_mm) { sync_mm_rss(old_mm); diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 2e4747e0aaf0..9f9992b37924 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -46,9 +46,10 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, struct ext2_sb_info *sbi = EXT2_SB(sb); if (block_group >= sbi->s_groups_count) { - WARN(1, "block_group >= groups_count - " - "block_group = %d, groups_count = %lu", - block_group, sbi->s_groups_count); + ext2_error (sb, "ext2_get_group_desc", + "block_group >= groups_count - " + "block_group = %d, groups_count = %lu", + block_group, sbi->s_groups_count); return NULL; } @@ -56,9 +57,10 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb); offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1); if (!sbi->s_group_desc[group_desc]) { - WARN(1, "Group descriptor not loaded - " - "block_group = %d, group_desc = %lu, desc = %lu", - block_group, group_desc, offset); + ext2_error (sb, "ext2_get_group_desc", + "Group descriptor not loaded - " + "block_group = %d, group_desc = %lu, desc = %lu", + block_group, group_desc, offset); return NULL; } diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 176a8382e372..d5055b3adccc 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -23,7 +23,6 @@ struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; - u32 ino; }; static struct kmem_cache *ext4_system_zone_cachep; @@ -44,8 +43,7 @@ void ext4_exit_system_zone(void) static inline int can_merge(struct ext4_system_zone *entry1, struct ext4_system_zone *entry2) { - if ((entry1->start_blk + entry1->count) == entry2->start_blk && - entry1->ino == entry2->ino) + if ((entry1->start_blk + entry1->count) == entry2->start_blk) return 1; return 0; } @@ -57,9 +55,9 @@ static inline int can_merge(struct ext4_system_zone *entry1, */ static int add_system_zone(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, - unsigned int count, u32 ino) + unsigned int count) { - struct ext4_system_zone *new_entry, *entry; + struct ext4_system_zone *new_entry = NULL, *entry; struct rb_node **n = &sbi->system_blks.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -70,21 +68,30 @@ static int add_system_zone(struct ext4_sb_info *sbi, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else /* Unexpected overlap of system zones. */ - return -EFSCORRUPTED; + else { + if (start_blk + count > (entry->start_blk + + entry->count)) + entry->count = (start_blk + count - + entry->start_blk); + new_node = *n; + new_entry = rb_entry(new_node, struct ext4_system_zone, + node); + break; + } } - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_entry->ino = ino; - new_node = &new_entry->node; + if (!new_entry) { + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_node = &new_entry->node; - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &sbi->system_blks); + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &sbi->system_blks); + } /* Can we merge to the left? */ node = rb_prev(new_node); @@ -156,16 +163,16 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino) if (n == 0) { i++; } else { - err = add_system_zone(sbi, map.m_pblk, n, ino); - if (err < 0) { - if (err == -EFSCORRUPTED) { - ext4_error(sb, - "blocks %llu-%llu from inode %u " + if (!ext4_data_block_valid(sbi, map.m_pblk, n)) { + ext4_error(sb, "blocks %llu-%llu from inode %u " "overlap system zone", map.m_pblk, map.m_pblk + map.m_len - 1, ino); - } + err = -EFSCORRUPTED; break; } + err = add_system_zone(sbi, map.m_pblk, n); + if (err < 0) + break; i += n; } } @@ -194,16 +201,16 @@ int ext4_setup_system_zone(struct super_block *sb) if (ext4_bg_has_super(sb, i) && ((i < 5) || ((i % flex_size) == 0))) add_system_zone(sbi, ext4_group_first_block_no(sb, i), - ext4_bg_num_gdb(sb, i) + 1, 0); + ext4_bg_num_gdb(sb, i) + 1); gdp = ext4_get_group_desc(sb, i, NULL); - ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1, 0); + ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1); if (ret) return ret; - ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1, 0); + ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1); if (ret) return ret; ret = add_system_zone(sbi, ext4_inode_table(sb, gdp), - sbi->s_itb_per_group, 0); + sbi->s_itb_per_group); if (ret) return ret; } @@ -236,11 +243,10 @@ void ext4_release_system_zone(struct super_block *sb) * start_blk+count) is valid; 0 if some part of the block region * overlaps with filesystem metadata blocks. */ -int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, - unsigned int count) +int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, + unsigned int count) { struct ext4_system_zone *entry; - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct rb_node *n = sbi->system_blks.rb_node; if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || @@ -256,8 +262,6 @@ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, else if (start_blk >= (entry->start_blk + entry->count)) n = n->rb_right; else { - if (entry->ino == inode->i_ino) - return 1; sbi->s_es->s_last_error_block = cpu_to_le64(start_blk); return 0; } @@ -280,7 +284,8 @@ int ext4_check_blockref(const char *function, unsigned int line, while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && - unlikely(!ext4_inode_block_valid(inode, blk, 1))) { + unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), + blk, 1))) { es->s_last_error_block = cpu_to_le64(blk); ext4_error_inode(inode, function, line, blk, "invalid block"); diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index df7014749be2..3dc54352c9e7 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -524,7 +524,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) struct dir_private_info *info = file->private_data; struct inode *inode = file_inode(file); struct fname *fname; - int ret = 0; + int ret; if (!info) { info = ext4_htree_create_dir_info(file, ctx->pos); @@ -572,7 +572,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) info->curr_minor_hash, &info->next_hash); if (ret < 0) - goto finished; + return ret; if (ret == 0) { ctx->pos = ext4_get_htree_eof(file); break; @@ -603,7 +603,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx) } finished: info->last_pos = ctx->pos; - return ret < 0 ? ret : 0; + return 0; } static int ext4_dir_open(struct inode * inode, struct file * filp) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 969beec3ff7e..7be7f5459e80 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -3163,9 +3163,9 @@ extern void ext4_release_system_zone(struct super_block *sb); extern int ext4_setup_system_zone(struct super_block *sb); extern int __init ext4_init_system_zone(void); extern void ext4_exit_system_zone(void); -extern int ext4_inode_block_valid(struct inode *inode, - ext4_fsblk_t start_blk, - unsigned int count); +extern int ext4_data_block_valid(struct ext4_sb_info *sbi, + ext4_fsblk_t start_blk, + unsigned int count); extern int ext4_check_blockref(const char *, unsigned int, struct inode *, __le32 *, unsigned int); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 4feac4bf90f4..4e692393607e 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -384,7 +384,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) */ if (lblock + len <= lblock) return 0; - return ext4_inode_block_valid(inode, block, len); + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } static int ext4_valid_extent_idx(struct inode *inode, @@ -392,7 +392,7 @@ static int ext4_valid_extent_idx(struct inode *inode, { ext4_fsblk_t block = ext4_idx_pblock(ext_idx); - return ext4_inode_block_valid(inode, block, 1); + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); } static int ext4_valid_extent_entries(struct inode *inode, @@ -549,10 +549,14 @@ __read_extent_tree_block(const char *function, unsigned int line, } if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; - err = __ext4_ext_check(function, line, inode, - ext_block_hdr(bh), depth, pblk); - if (err) - goto errout; + if (!ext4_has_feature_journal(inode->i_sb) || + (inode->i_ino != + le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { + err = __ext4_ext_check(function, line, inode, + ext_block_hdr(bh), depth, pblk); + if (err) + goto errout; + } set_buffer_verified(bh); /* * If this is a leaf block, cache all of its entries @@ -865,7 +869,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) eh->eh_entries = 0; eh->eh_magic = EXT4_EXT_MAGIC; eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); - eh->eh_generation = 0; ext4_mark_inode_dirty(handle, inode); return 0; } @@ -1129,7 +1132,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; - neh->eh_generation = 0; /* move remainder of path[depth] to the new leaf */ if (unlikely(path[depth].p_hdr->eh_entries != @@ -1207,7 +1209,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); neh->eh_depth = cpu_to_le16(depth - i); - neh->eh_generation = 0; fidx = EXT_FIRST_INDEX(neh); fidx->ei_block = border; ext4_idx_store_pblock(fidx, oldblock); @@ -3263,10 +3264,7 @@ static int ext4_split_extent_at(handle_t *handle, ext4_ext_mark_unwritten(ex2); err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); - if (err != -ENOSPC && err != -EDQUOT) - goto out; - - if (EXT4_EXT_MAY_ZEROOUT & split_flag) { + if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & EXT4_EXT_DATA_VALID1) { err = ext4_ext_zeroout(inode, ex2); @@ -3292,30 +3290,30 @@ static int ext4_split_extent_at(handle_t *handle, ext4_ext_pblock(&orig_ex)); } - if (!err) { - /* update the extent length and mark as initialized */ - ex->ee_len = cpu_to_le16(ee_len); - ext4_ext_try_to_merge(handle, inode, path, ex); - err = ext4_ext_dirty(handle, inode, path + path->p_depth); - if (!err) - /* update extent status tree */ - err = ext4_zeroout_es(inode, &zero_ex); - /* If we failed at this point, we don't know in which - * state the extent tree exactly is so don't try to fix - * length of the original extent as it may do even more - * damage. - */ - goto out; - } - } + if (err) + goto fix_extent_len; + /* update the extent length and mark as initialized */ + ex->ee_len = cpu_to_le16(ee_len); + ext4_ext_try_to_merge(handle, inode, path, ex); + err = ext4_ext_dirty(handle, inode, path + path->p_depth); + if (err) + goto fix_extent_len; + + /* update extent status tree */ + err = ext4_zeroout_es(inode, &zero_ex); + + goto out; + } else if (err) + goto fix_extent_len; + +out: + ext4_ext_show_leaf(inode, path); + return err; fix_extent_len: ex->ee_len = orig_ex.ee_len; ext4_ext_dirty(handle, inode, path + path->p_depth); return err; -out: - ext4_ext_show_leaf(inode, path); - return err; } /* diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 665cf30c95e9..ac748b3af1c1 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1080,9 +1080,11 @@ static unsigned long ext4_es_scan(struct shrinker *shrink, ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); + if (!nr_to_scan) + return ret; + nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); - ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); return nr_shrunk; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 685a26e9540f..3753a8a05a2e 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -405,7 +405,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, * * We always try to spread first-level directories. * - * If there are blockgroups with both free inodes and free clusters counts + * If there are blockgroups with both free inodes and free blocks counts * not worse than average we return one with smallest directory count. * Otherwise we simply return a random group. * @@ -414,7 +414,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, * It's OK to put directory into a group unless * it has too many directories already (max_dirs) or * it has too few free inodes left (min_inodes) or - * it has too few free clusters left (min_clusters) or + * it has too few free blocks left (min_blocks) or * Parent's group is preferred, if it doesn't satisfy these * conditions we search cyclically through the rest. If none * of the groups look good we just look for a group with more @@ -430,7 +430,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, ext4_group_t real_ngroups = ext4_get_groups_count(sb); int inodes_per_group = EXT4_INODES_PER_GROUP(sb); unsigned int freei, avefreei, grp_free; - ext4_fsblk_t freec, avefreec; + ext4_fsblk_t freeb, avefreec; unsigned int ndirs; int max_dirs, min_inodes; ext4_grpblk_t min_clusters; @@ -449,8 +449,9 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); avefreei = freei / ngroups; - freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter); - avefreec = freec; + freeb = EXT4_C2B(sbi, + percpu_counter_read_positive(&sbi->s_freeclusters_counter)); + avefreec = freeb; do_div(avefreec, ngroups); ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); @@ -1277,7 +1278,6 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, handle_t *handle; ext4_fsblk_t blk; int num, ret = 0, used_blks = 0; - unsigned long used_inos = 0; /* This should not happen, but just to be sure check this */ if (sb->s_flags & MS_RDONLY) { @@ -1308,37 +1308,22 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, * used inodes so we need to skip blocks with used inodes in * inode table. */ - if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { - used_inos = EXT4_INODES_PER_GROUP(sb) - - ext4_itable_unused_count(sb, gdp); - used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block); - - /* Bogus inode unused count? */ - if (used_blks < 0 || used_blks > sbi->s_itb_per_group) { - ext4_error(sb, "Something is wrong with group %u: " - "used itable blocks: %d; " - "itable unused count: %u", - group, used_blks, - ext4_itable_unused_count(sb, gdp)); - ret = 1; - goto err_out; - } - - used_inos += group * EXT4_INODES_PER_GROUP(sb); - /* - * Are there some uninitialized inodes in the inode table - * before the first normal inode? - */ - if ((used_blks != sbi->s_itb_per_group) && - (used_inos < EXT4_FIRST_INO(sb))) { - ext4_error(sb, "Something is wrong with group %u: " - "itable unused count: %u; " - "itables initialized count: %ld", - group, ext4_itable_unused_count(sb, gdp), - used_inos); - ret = 1; - goto err_out; - } + if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) + used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) - + ext4_itable_unused_count(sb, gdp)), + sbi->s_inodes_per_block); + + if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || + ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - + ext4_itable_unused_count(sb, gdp)) < + EXT4_FIRST_INO(sb)))) { + ext4_error(sb, "Something is wrong with group %u: " + "used itable blocks: %d; " + "itable unused count: %u", + group, used_blks, + ext4_itable_unused_count(sb, gdp)); + ret = 1; + goto err_out; } blk = ext4_inode_table(sb, gdp) + used_blks; diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 4f610cd8041b..08f3a0c0f468 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -946,7 +946,8 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, else if (ext4_should_journal_data(inode)) flags |= EXT4_FREE_BLOCKS_FORGET; - if (!ext4_inode_block_valid(inode, block_to_free, count)) { + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, + count)) { EXT4_ERROR_INODE(inode, "attempt to clear invalid " "blocks %llu len %lu", (unsigned long long) block_to_free, count); @@ -1108,7 +1109,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, if (!nr) continue; /* A hole */ - if (!ext4_inode_block_valid(inode, nr, 1)) { + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), + nr, 1)) { EXT4_ERROR_INODE(inode, "invalid indirect mapped " "block %lu (level %d)", diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 15c2a23fba75..1016a8ddd3b0 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -760,12 +760,6 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, ext4_write_lock_xattr(inode, &no_expand); BUG_ON(!ext4_has_inline_data(inode)); - /* - * ei->i_inline_off may have changed since ext4_write_begin() - * called ext4_try_to_write_inline_data() - */ - (void) ext4_find_inline_data_nolock(inode); - kaddr = kmap_atomic(page); ext4_write_inline_data(inode, &iloc, kaddr, pos, len); kunmap_atomic(kaddr); @@ -1136,15 +1130,7 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc, void *buf, int inline_size) { - int ret; - - ret = ext4_create_inline_data(handle, inode, inline_size); - if (ret) { - ext4_msg(inode->i_sb, KERN_EMERG, - "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)", - inode->i_ino, ret); - return; - } + ext4_create_inline_data(handle, inode, inline_size); ext4_write_inline_data(inode, iloc, buf, 0, inline_size); ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4fbede7d7e2f..e269e518f6c0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -383,7 +383,8 @@ static int __check_block_validity(struct inode *inode, const char *func, (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; - if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, + map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, @@ -1854,13 +1855,13 @@ static int __ext4_journalled_writepage(struct page *page, if (!ret) ret = err; + if (!ext4_has_inline_data(inode)) + ext4_walk_page_buffers(NULL, page_bufs, 0, len, + NULL, bput_one); ext4_set_inode_state(inode, EXT4_STATE_JDATA); out: unlock_page(page); out_no_pagelock: - if (!inline_data && page_bufs) - ext4_walk_page_buffers(NULL, page_bufs, 0, len, - NULL, bput_one); brelse(inode_bh); return ret; } @@ -4512,7 +4513,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = 0; if (ei->i_file_acl && - !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) { + !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { ext4_error_inode(inode, function, line, 0, "iget: bad extended attribute block %llu", ei->i_file_acl); @@ -4702,7 +4703,7 @@ static int ext4_do_update_inode(handle_t *handle, struct ext4_inode_info *ei = EXT4_I(inode); struct buffer_head *bh = iloc->bh; struct super_block *sb = inode->i_sb; - int err = 0, block; + int err = 0, rc, block; int need_datasync = 0, set_large_file = 0; uid_t i_uid; gid_t i_gid; @@ -4802,9 +4803,9 @@ static int ext4_do_update_inode(handle_t *handle, bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, NULL, bh); - if (err) - goto out_brelse; + rc = ext4_handle_dirty_metadata(handle, NULL, bh); + if (!err) + err = rc; ext4_clear_inode_state(inode, EXT4_STATE_NEW); if (set_large_file) { BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access"); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index fb750dc13830..aca311a57abd 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -615,6 +615,8 @@ resizefs_out: sizeof(range))) return -EFAULT; + range.minlen = max((unsigned int)range.minlen, + q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range, flags); if (ret < 0) return ret; @@ -675,10 +677,7 @@ encryption_policy_out: err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto pwsalt_err_journal; - lock_buffer(sbi->s_sbh); generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); - ext4_superblock_csum_set(sb); - unlock_buffer(sbi->s_sbh); err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); pwsalt_err_journal: diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c15cb259c9ea..c80223bde667 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2961,7 +2961,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { + if (!ext4_data_block_valid(sbi, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error @@ -4647,7 +4647,6 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, cluster), "Block already on to-be-freed list"); - kmem_cache_free(ext4_free_data_cachep, new_entry); return 0; } } @@ -4719,7 +4718,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && - !ext4_inode_block_valid(inode, block, count)) { + !ext4_data_block_valid(sbi, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; @@ -5231,7 +5230,6 @@ out: int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range, unsigned long blkdev_flags) { - struct request_queue *q = bdev_get_queue(sb->s_bdev); struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; @@ -5250,13 +5248,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range, start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; - /* No point to try to trim less than discard granularity */ - if (range->minlen < q->limits.discard_granularity) { - minlen = EXT4_NUM_B2C(EXT4_SB(sb), - q->limits.discard_granularity >> sb->s_blocksize_bits); - if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) - goto out; - } if (end >= max_blks) end = max_blks - 1; if (end <= first_data_blk) diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 1073e24ab622..a4651894cc33 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -459,12 +459,12 @@ int ext4_ext_migrate(struct inode *inode) return retval; /* - * Worst case we can touch the allocation bitmaps and a block - * group descriptor block. We do need need to worry about - * credits for modifying the quota inode. + * Worst case we can touch the allocation bitmaps, a bgd + * block, and a block to link in the orphan list. We do need + * need to worry about credits for modifying the quota inode. */ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, - 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); + 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); @@ -481,13 +481,6 @@ int ext4_ext_migrate(struct inode *inode) ext4_journal_stop(handle); return retval; } - /* - * Use the correct seed for checksum (i.e. the seed from 'inode'). This - * is so that the metadata blocks will have the correct checksum after - * the migration. - */ - ei = EXT4_I(inode); - EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed; i_size_write(tmp_inode, i_size_read(inode)); /* * Set the i_nlink to zero so it will be deleted later @@ -496,6 +489,7 @@ int ext4_ext_migrate(struct inode *inode) clear_nlink(tmp_inode); ext4_ext_tree_init(handle, tmp_inode); + ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); /* @@ -520,10 +514,17 @@ int ext4_ext_migrate(struct inode *inode) handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) { + /* + * It is impossible to update on-disk structures without + * a handle, so just rollback in-core changes and live other + * work to orphan_list_cleanup() + */ + ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out; } + ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 313a61626d2d..574ca958aa2c 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3370,35 +3370,12 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent, return retval; } } + brelse(ent->bh); + ent->bh = NULL; return 0; } -static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, - unsigned ino, unsigned file_type) -{ - struct ext4_renament old = *ent; - int retval = 0; - - /* - * old->de could have moved from under us during make indexed dir, - * so the old->de may no longer valid and need to find it again - * before reset old inode info. - */ - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); - if (IS_ERR(old.bh)) - retval = PTR_ERR(old.bh); - if (!old.bh) - retval = -ENOENT; - if (retval) { - ext4_std_error(old.dir->i_sb, retval); - return; - } - - ext4_setent(handle, &old, ino, file_type); - brelse(old.bh); -} - static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, const struct qstr *d_name) { @@ -3553,14 +3530,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, */ retval = -ENOENT; if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino) - goto release_bh; + goto end_rename; if ((old.dir != new.dir) && ext4_encrypted_inode(new.dir) && !ext4_is_child_context_consistent_with_parent(new.dir, old.inode)) { retval = -EXDEV; - goto release_bh; + goto end_rename; } new.bh = ext4_find_entry(new.dir, &new.dentry->d_name, @@ -3568,7 +3545,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, if (IS_ERR(new.bh)) { retval = PTR_ERR(new.bh); new.bh = NULL; - goto release_bh; + goto end_rename; } if (new.bh) { if (!new.inode) { @@ -3585,17 +3562,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits); if (IS_ERR(handle)) { retval = PTR_ERR(handle); - goto release_bh; + handle = NULL; + goto end_rename; } } else { whiteout = ext4_whiteout_for_rename(&old, credits, &handle); if (IS_ERR(whiteout)) { retval = PTR_ERR(whiteout); - goto release_bh; + whiteout = NULL; + goto end_rename; } } - old_file_type = old.de->file_type; if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) ext4_handle_sync(handle); @@ -3623,6 +3601,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, force_reread = (new.dir->i_ino == old.dir->i_ino && ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); + old_file_type = old.de->file_type; if (whiteout) { /* * Do this before adding a new entry, so the old entry is sure @@ -3694,23 +3673,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, retval = 0; end_rename: + brelse(old.dir_bh); + brelse(old.bh); + brelse(new.bh); if (whiteout) { - if (retval) { - ext4_resetent(handle, &old, - old.inode->i_ino, old_file_type); + if (retval) drop_nlink(whiteout); - ext4_orphan_add(handle, whiteout); - } unlock_new_inode(whiteout); - ext4_journal_stop(handle); iput(whiteout); - } else { - ext4_journal_stop(handle); } -release_bh: - brelse(old.dir_bh); - brelse(old.bh); - brelse(new.bh); + if (handle) + ext4_journal_stop(handle); return retval; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 6d3b72c959c8..089e4c4c2df5 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4497,10 +4497,8 @@ static int ext4_commit_super(struct super_block *sb, int sync) struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; int error = 0; - if (!sbh) - return -EINVAL; - if (block_device_ejected(sb)) - return -ENODEV; + if (!sbh || block_device_ejected(sb)) + return error; /* * The superblock bh should be mapped, but it might not be if the @@ -4973,10 +4971,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - err = ext4_setup_system_zone(sb); - if (err) - goto restore_opts; - + ext4_setup_system_zone(sb); if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) ext4_commit_super(sb, 1); @@ -5370,7 +5365,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, struct buffer_head *bh; handle_t *handle = journal_current_handle(); - if (!handle) { + if (EXT4_SB(sb)->s_journal && !handle) { ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because transaction is not started", (unsigned long long)off, (unsigned long long)len); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index aa8a31be2eb2..a714f430675c 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -759,8 +759,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr) if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; - if (!in_group_p(inode->i_gid) && - !capable_wrt_inode_uidgid(inode, CAP_FSETID)) + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(inode, mode); } diff --git a/fs/file.c b/fs/file.c index 3ab8eefa7077..090015401c55 100644 --- a/fs/file.c +++ b/fs/file.c @@ -691,88 +691,38 @@ void do_close_on_exec(struct files_struct *files) spin_unlock(&files->file_lock); } -static inline struct file *__fget_files_rcu(struct files_struct *files, - unsigned int fd, fmode_t mask, unsigned int refs) -{ - for (;;) { - struct file *file; - struct fdtable *fdt = rcu_dereference_raw(files->fdt); - struct file __rcu **fdentry; - - if (unlikely(fd >= fdt->max_fds)) - return NULL; - - fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds); - file = rcu_dereference_raw(*fdentry); - if (unlikely(!file)) - return NULL; - - if (unlikely(file->f_mode & mask)) - return NULL; - - /* - * Ok, we have a file pointer. However, because we do - * this all locklessly under RCU, we may be racing with - * that file being closed. - * - * Such a race can take two forms: - * - * (a) the file ref already went down to zero, - * and get_file_rcu_many() fails. Just try - * again: - */ - if (unlikely(!get_file_rcu_many(file, refs))) - continue; - - /* - * (b) the file table entry has changed under us. - * Note that we don't need to re-check the 'fdt->fd' - * pointer having changed, because it always goes - * hand-in-hand with 'fdt'. - * - * If so, we need to put our refs and try again. - */ - if (unlikely(rcu_dereference_raw(files->fdt) != fdt) || - unlikely(rcu_dereference_raw(*fdentry) != file)) { - fput_many(file, refs); - continue; - } - - /* - * Ok, we have a ref to the file, and checked that it - * still exists. - */ - return file; - } -} - - -static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs) +static struct file *__fget(unsigned int fd, fmode_t mask) { struct files_struct *files = current->files; struct file *file; rcu_read_lock(); - file = __fget_files_rcu(files, fd, mask, refs); +loop: + file = fcheck_files(files, fd); + if (file) { + /* File object ref couldn't be taken. + * dup2() atomicity guarantee is the reason + * we loop to catch the new file (or NULL pointer) + */ + if (file->f_mode & mask) + file = NULL; + else if (!get_file_rcu(file)) + goto loop; + } rcu_read_unlock(); return file; } -struct file *fget_many(unsigned int fd, unsigned int refs) -{ - return __fget(fd, FMODE_PATH, refs); -} - struct file *fget(unsigned int fd) { - return __fget(fd, FMODE_PATH, 1); + return __fget(fd, FMODE_PATH); } EXPORT_SYMBOL(fget); struct file *fget_raw(unsigned int fd) { - return __fget(fd, 0, 1); + return __fget(fd, 0); } EXPORT_SYMBOL(fget_raw); @@ -803,7 +753,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask) return 0; return (unsigned long)file; } else { - file = __fget(fd, mask, 1); + file = __fget(fd, mask); if (!file) return 0; return FDPUT_FPUT | (unsigned long)file; diff --git a/fs/file_table.c b/fs/file_table.c index 89dc9d4b3555..b4baa0de4988 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -397,9 +397,9 @@ void flush_delayed_fput(void) static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); -void fput_many(struct file *file, unsigned int refs) +void fput(struct file *file) { - if (atomic_long_sub_and_test(refs, &file->f_count)) { + if (atomic_long_dec_and_test(&file->f_count)) { struct task_struct *task = current; if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { @@ -418,11 +418,6 @@ void fput_many(struct file *file, unsigned int refs) } } -void fput(struct file *file) -{ - fput_many(file, 1); -} - /* * synchronous analog of fput(); for kernel threads that might be needed * in some umount() (and thus can't use flush_delayed_fput() without diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0ce7ff7a2ce8..516f7eb36a73 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -512,14 +512,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id) /* find and pin the new wb */ rcu_read_lock(); memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys); - if (memcg_css && !css_tryget(memcg_css)) - memcg_css = NULL; + if (memcg_css) + isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); rcu_read_unlock(); - if (!memcg_css) - goto out_free; - - isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); - css_put(memcg_css); if (!isw->new_wb) goto out_free; @@ -1934,7 +1929,7 @@ void wb_workfn(struct work_struct *work) struct bdi_writeback, dwork); long pages_written; - set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); + set_worker_desc("flush-%s", dev_name(wb->bdi->dev)); current->flags |= PF_SWAPWRITE; if (likely(!current_is_workqueue_rescuer() || @@ -2045,6 +2040,28 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, return ret; } +static noinline void block_dump___mark_inode_dirty(struct inode *inode) +{ + if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { + struct dentry *dentry; + const char *name = "?"; + + dentry = d_find_alias(inode); + if (dentry) { + spin_lock(&dentry->d_lock); + name = (const char *) dentry->d_name.name; + } + printk(KERN_DEBUG + "%s(%d): dirtied inode %lu (%s) on %s\n", + current->comm, task_pid_nr(current), inode->i_ino, + name, inode->i_sb->s_id); + if (dentry) { + spin_unlock(&dentry->d_lock); + dput(dentry); + } + } +} + /** * __mark_inode_dirty - internal function * @inode: inode to mark @@ -2103,6 +2120,9 @@ void __mark_inode_dirty(struct inode *inode, int flags) (dirtytime && (inode->i_state & I_DIRTY_INODE))) return; + if (unlikely(block_dump > 1)) + block_dump___mark_inode_dirty(inode); + spin_lock(&inode->i_lock); if (dirtytime && (inode->i_state & I_DIRTY_INODE)) goto out_unlock_inode; diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index b83367300f48..d9aba9700726 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -616,8 +616,6 @@ static int __init cuse_init(void) cuse_channel_fops.owner = THIS_MODULE; cuse_channel_fops.open = cuse_channel_open; cuse_channel_fops.release = cuse_channel_release; - /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */ - cuse_channel_fops.unlocked_ioctl = NULL; cuse_class = class_create(THIS_MODULE, "cuse"); if (IS_ERR(cuse_class)) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index fc55909ce515..aaf514655fed 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -937,13 +937,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (!(buf->flags & PIPE_BUF_FLAG_LRU)) lru_cache_add_file(newpage); - /* - * Release while we have extra ref on stolen page. Otherwise - * anon_pipe_buf_release() might think the page can be reused. - */ - buf->ops->release(cs->pipe, buf); - buf->ops = NULL; - err = 0; spin_lock(&cs->req->waitq.lock); if (test_bit(FR_ABORTED, &cs->req->flags)) @@ -1332,15 +1325,6 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, goto restart; } spin_lock(&fpq->lock); - /* - * Must not put request on fpq->io queue after having been shut down by - * fuse_abort_conn() - */ - if (!fpq->connected) { - req->out.h.error = err = -ECONNABORTED; - goto out_end; - - } list_add(&req->list, &fpq->io); spin_unlock(&fpq->lock); cs->req = req; @@ -1948,7 +1932,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, } err = -EINVAL; - if (oh.error <= -512 || oh.error > 0) + if (oh.error <= -1000 || oh.error > 0) goto err_finish; spin_lock(&fpq->lock); @@ -2114,8 +2098,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, out_free: for (idx = 0; idx < nbuf; idx++) { struct pipe_buffer *buf = &bufs[idx]; - if (buf->ops) - buf->ops->release(pipe, buf); + buf->ops->release(pipe, buf); } pipe_unlock(pipe); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 2aa6f983ca44..4845d6739669 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -276,7 +276,7 @@ struct fuse_io_priv { #define FUSE_IO_PRIV_SYNC(f) \ { \ - .refcnt = KREF_INIT(1), \ + .refcnt = { ATOMIC_INIT(1) }, \ .async = 0, \ .file = f, \ } diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index f115ce93dfb4..8e8695eb652a 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1342,7 +1342,6 @@ __acquires(&lru_lock) while(!list_empty(list)) { gl = list_entry(list->next, struct gfs2_glock, gl_lru); list_del_init(&gl->gl_lru); - clear_bit(GLF_LRU, &gl->gl_flags); if (!spin_trylock(&gl->gl_lockref.lock)) { add_back_to_lru: list_add(&gl->gl_lru, &lru_list); @@ -1389,6 +1388,7 @@ static long gfs2_scan_glock_lru(int nr) if (!test_bit(GLF_LOCK, &gl->gl_flags)) { list_move(&gl->gl_lru, &dispose); atomic_dec(&lru_count); + clear_bit(GLF_LRU, &gl->gl_flags); freed++; continue; } diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index da9f97911852..3c3d037df824 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -284,6 +284,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; + int lvb_needs_unlock = 0; int error; if (gl->gl_lksb.sb_lkid == 0) { @@ -296,15 +297,13 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to call dlm if we've unmounted the lock protocol */ - if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { - gfs2_glock_free(gl); - return; - } - /* don't want to skip dlm_unlock writing the lvb when lock has one */ + /* don't want to skip dlm_unlock writing the lvb when lock is ex */ + + if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) + lvb_needs_unlock = 1; if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !gl->gl_lksb.sb_lvbptr) { + !lvb_needs_unlock) { gfs2_glock_free(gl); return; } diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index dd0d8c1bf5c5..faa5e0e2c449 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1017,10 +1017,6 @@ static int gfs2_ri_update(struct gfs2_inode *ip) if (error < 0) return error; - if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) { - fs_err(sdp, "no resource groups found in the file system.\n"); - return -ENOENT; - } set_rgrp_preferences(sdp); sdp->sd_rindex_uptodate = 1; diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index 7f2ef95dcd05..de69d8a24f6d 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c @@ -24,19 +24,7 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) fd->key = ptr + tree->max_key_len + 2; hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); - switch (tree->cnid) { - case HFS_CAT_CNID: - mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); - break; - case HFS_EXT_CNID: - mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); - break; - case HFS_ATTR_CNID: - mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); - break; - default: - return -EINVAL; - } + mutex_lock(&tree->tree_lock); return 0; } diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index 2cda99e61cae..221719eac5de 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -14,31 +14,16 @@ #include "btree.h" -void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) +void hfs_bnode_read(struct hfs_bnode *node, void *buf, + int off, int len) { struct page *page; - int pagenum; - int bytes_read; - int bytes_to_read; - void *vaddr; off += node->page_offset; - pagenum = off >> PAGE_SHIFT; - off &= ~PAGE_MASK; /* compute page offset for the first page */ - - for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) { - if (pagenum >= node->tree->pages_per_bnode) - break; - page = node->page[pagenum]; - bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off); - - vaddr = kmap_atomic(page); - memcpy(buf + bytes_read, vaddr + off, bytes_to_read); - kunmap_atomic(vaddr); + page = node->page[0]; - pagenum++; - off = 0; /* page offset only applies to the first page */ - } + memcpy(buf, kmap(page) + off, len); + kunmap(page); } u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h index 308b5f1af65b..2715f416b5a8 100644 --- a/fs/hfs/btree.h +++ b/fs/hfs/btree.h @@ -12,13 +12,6 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *); #define NODE_HASH_SIZE 256 -/* B-tree mutex nested subclasses */ -enum hfs_btree_mutex_classes { - CATALOG_BTREE_MUTEX, - EXTENTS_BTREE_MUTEX, - ATTR_BTREE_MUTEX, -}; - /* A HFS BTree held in memory */ struct hfs_btree { struct super_block *sb; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 3eb815bb2c78..4574fdd3d421 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -426,12 +426,14 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; - goto bail_hfs_find; + goto bail; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } - if (res) - goto bail_hfs_find; + if (res) { + hfs_find_exit(&fd); + goto bail_no_root; + } res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); @@ -447,8 +449,6 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) /* everything's okay */ return 0; -bail_hfs_find: - hfs_find_exit(&fd); bail_no_root: pr_err("get root inode failed\n"); bail: diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 1d5e3b0a3b1a..937c6ee1786f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -414,7 +414,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, if (next >= end) break; - hash = hugetlb_fault_mutex_hash(h, mapping, next); + hash = hugetlb_fault_mutex_hash(h, mapping, next, 0); mutex_lock(&hugetlb_fault_mutex_table[hash]); lock_page(page); @@ -630,7 +630,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ - hash = hugetlb_fault_mutex_hash(h, mapping, index); + hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ @@ -661,9 +661,8 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_unlock(&hugetlb_fault_mutex_table[hash]); - set_page_huge_active(page); /* - * put_page() due to reference from alloc_huge_page() + * page_put due to reference from alloc_huge_page() * unlock_page because locked by add_to_page_cache() */ put_page(page); diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 2e7d74c7beed..b943cbd963bb 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -151,7 +151,6 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, inode->i_ino); - brelse(bh); return -EIO; } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 41ece64f1a34..350f67fb5b9c 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1268,8 +1268,6 @@ static int isofs_read_inode(struct inode *inode, int relocated) de = (struct iso_directory_record *) (bh->b_data + offset); de_len = *(unsigned char *) de; - if (de_len < sizeof(struct iso_directory_record)) - goto fail; if (offset + de_len > bufsize) { int frag1 = bufsize - offset; diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 696f255d1532..7b543e6b6526 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -101,7 +101,6 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, dir->i_ino); - brelse(bh); return 0; } diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 79e771ab624f..406d9cc84ba8 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c @@ -37,9 +37,6 @@ static int jffs2_rtime_compress(unsigned char *data_in, int outpos = 0; int pos=0; - if (*dstlen <= 3) - return -1; - memset(positions,0,sizeof(positions)); while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index bee8964682f8..5b52ea41b84f 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -672,22 +672,6 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r jffs2_free_full_dirent(fd); return -EIO; } - -#ifdef CONFIG_JFFS2_SUMMARY - /* - * we use CONFIG_JFFS2_SUMMARY because without it, we - * have checked it while mounting - */ - crc = crc32(0, fd->name, rd->nsize); - if (unlikely(crc != je32_to_cpu(rd->name_crc))) { - JFFS2_NOTICE("name CRC failed on dirent node at" - "%#08x: read %#08x,calculated %#08x\n", - ref_offset(ref), je32_to_cpu(rd->node_crc), crc); - jffs2_mark_node_obsolete(c, ref); - jffs2_free_full_dirent(fd); - return 0; - } -#endif } fd->nhash = full_name_hash(fd->name, rd->nsize); diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 5f90173ae38d..9ad5ba4b299b 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -1075,7 +1075,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo memcpy(&fd->name, rd->name, checkedlen); fd->name[checkedlen] = 0; - crc = crc32(0, fd->name, checkedlen); + crc = crc32(0, fd->name, rd->nsize); if (crc != je32_to_cpu(rd->name_crc)) { pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", __func__, ofs, je32_to_cpu(rd->name_crc), crc); diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index c05d6f5f10ec..bc5385471a6e 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c @@ -783,8 +783,6 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", je16_to_cpu(temp->u.nodetype)); jffs2_sum_disable_collecting(c->summary); - /* The above call removes the list, nothing more to do */ - goto bail_rwcompat; } else { BUG(); /* unknown node in summary information */ } @@ -796,7 +794,6 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock c->summary->sum_num--; } - bail_rwcompat: jffs2_sum_reset_collected(c->summary); diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b318732a8562..41aa3ca6a6a4 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -160,8 +160,7 @@ void jfs_evict_inode(struct inode *inode) if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); - if (JFS_SBI(inode->i_sb)->ipimap) - diFree(inode); + diFree(inode); /* * Free the inode from the quota allocation. diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 9ff510a489cb..2d514c7affc2 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -1669,7 +1669,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen) } else if (rc == -ENOSPC) { /* search for next smaller log2 block */ l2nb = BLKSTOL2(nblocks) - 1; - nblocks = 1LL << l2nb; + nblocks = 1 << l2nb; } else { /* Trim any already allocated blocks */ jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n"); diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h index f502a15c6c98..562b9a7e4311 100644 --- a/fs/jfs/jfs_dmap.h +++ b/fs/jfs/jfs_dmap.h @@ -196,7 +196,7 @@ typedef union dmtree { #define dmt_leafidx t1.leafidx #define dmt_height t1.height #define dmt_budmin t1.budmin -#define dmt_stree t2.stree +#define dmt_stree t1.stree /* * on-disk aggregate disk allocation map descriptor. diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h index 415bfa90607a..b67d64671bb4 100644 --- a/fs/jfs/jfs_filsys.h +++ b/fs/jfs/jfs_filsys.h @@ -281,6 +281,5 @@ * fsck() must be run to repair */ #define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */ -#define FM_STATE_MAX 0x0000000f /* max value of s_state */ #endif /* _H_JFS_FILSYS */ diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index d19542a88c2c..a69bdf2a1085 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1339,7 +1339,6 @@ int lmLogInit(struct jfs_log * log) } else { if (memcmp(logsuper->uuid, log->uuid, 16)) { jfs_warn("wrong uuid on JFS log device"); - rc = -EINVAL; goto errout20; } log->size = le32_to_cpu(logsuper->size); diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c index 0c2aabba1fdb..9895595fd2f2 100644 --- a/fs/jfs/jfs_mount.c +++ b/fs/jfs/jfs_mount.c @@ -49,7 +49,6 @@ #include #include -#include #include "jfs_incore.h" #include "jfs_filsys.h" @@ -93,14 +92,14 @@ int jfs_mount(struct super_block *sb) * (initialize mount inode from the superblock) */ if ((rc = chkSuper(sb))) { - goto out; + goto errout20; } ipaimap = diReadSpecial(sb, AGGREGATE_I, 0); if (ipaimap == NULL) { jfs_err("jfs_mount: Failed to read AGGREGATE_I"); rc = -EIO; - goto out; + goto errout20; } sbi->ipaimap = ipaimap; @@ -111,7 +110,7 @@ int jfs_mount(struct super_block *sb) */ if ((rc = diMount(ipaimap))) { jfs_err("jfs_mount: diMount(ipaimap) failed w/rc = %d", rc); - goto err_ipaimap; + goto errout21; } /* @@ -120,7 +119,7 @@ int jfs_mount(struct super_block *sb) ipbmap = diReadSpecial(sb, BMAP_I, 0); if (ipbmap == NULL) { rc = -EIO; - goto err_umount_ipaimap; + goto errout22; } jfs_info("jfs_mount: ipbmap:0x%p", ipbmap); @@ -132,7 +131,7 @@ int jfs_mount(struct super_block *sb) */ if ((rc = dbMount(ipbmap))) { jfs_err("jfs_mount: dbMount failed w/rc = %d", rc); - goto err_ipbmap; + goto errout22; } /* @@ -151,7 +150,7 @@ int jfs_mount(struct super_block *sb) if (!ipaimap2) { jfs_err("jfs_mount: Failed to read AGGREGATE_I"); rc = -EIO; - goto err_umount_ipbmap; + goto errout35; } sbi->ipaimap2 = ipaimap2; @@ -163,7 +162,7 @@ int jfs_mount(struct super_block *sb) if ((rc = diMount(ipaimap2))) { jfs_err("jfs_mount: diMount(ipaimap2) failed, rc = %d", rc); - goto err_ipaimap2; + goto errout35; } } else /* Secondary aggregate inode table is not valid */ @@ -180,7 +179,7 @@ int jfs_mount(struct super_block *sb) jfs_err("jfs_mount: Failed to read FILESYSTEM_I"); /* open fileset secondary inode allocation map */ rc = -EIO; - goto err_umount_ipaimap2; + goto errout40; } jfs_info("jfs_mount: ipimap:0x%p", ipimap); @@ -190,34 +189,41 @@ int jfs_mount(struct super_block *sb) /* initialize fileset inode allocation map */ if ((rc = diMount(ipimap))) { jfs_err("jfs_mount: diMount failed w/rc = %d", rc); - goto err_ipimap; + goto errout41; } - return rc; + goto out; /* * unwind on error */ -err_ipimap: - /* close fileset inode allocation map inode */ + errout41: /* close fileset inode allocation map inode */ diFreeSpecial(ipimap); -err_umount_ipaimap2: + + errout40: /* fileset closed */ + /* close secondary aggregate inode allocation map */ - if (ipaimap2) + if (ipaimap2) { diUnmount(ipaimap2, 1); -err_ipaimap2: - /* close aggregate inodes */ - if (ipaimap2) diFreeSpecial(ipaimap2); -err_umount_ipbmap: /* close aggregate block allocation map */ + } + + errout35: + + /* close aggregate block allocation map */ dbUnmount(ipbmap, 1); -err_ipbmap: /* close aggregate inodes */ diFreeSpecial(ipbmap); -err_umount_ipaimap: /* close aggregate inode allocation map */ + + errout22: /* close aggregate inode allocation map */ + diUnmount(ipaimap, 1); -err_ipaimap: /* close aggregate inodes */ + + errout21: /* close aggregate inodes */ diFreeSpecial(ipaimap); -out: + errout20: /* aggregate closed */ + + out: + if (rc) jfs_err("Mount JFS Failure: %d", rc); @@ -372,15 +378,6 @@ static int chkSuper(struct super_block *sb) sbi->bsize = bsize; sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize); - /* check some fields for possible corruption */ - if (sbi->l2bsize != ilog2((u32)bsize) || - j_sb->pad != 0 || - le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) { - rc = -EINVAL; - jfs_err("jfs_mount: Mount Failure: superblock is corrupt!"); - goto out; - } - /* * For now, ignore s_pbsize, l2bfactor. All I/O going through buffer * cache. diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 603fa652b965..c7eb47f2fb6c 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -430,7 +430,12 @@ nlm_bind_host(struct nlm_host *host) * RPC rebind is required */ if ((clnt = host->h_rpcclnt) != NULL) { - nlm_rebind_host(host); + if (time_after_eq(jiffies, host->h_nextrebind)) { + rpc_force_rebind(clnt); + host->h_nextrebind = jiffies + NLM_HOST_REBIND; + dprintk("lockd: next rebind in %lu jiffies\n", + host->h_nextrebind - jiffies); + } } else { unsigned long increment = nlmsvc_timeout; struct rpc_timeout timeparms = { @@ -478,20 +483,13 @@ nlm_bind_host(struct nlm_host *host) return clnt; } -/** - * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port - * @host: NLM host handle for peer - * - * This is not needed when using a connection-oriented protocol, such as TCP. - * The existing autobind mechanism is sufficient to force a rebind when - * required, e.g. on connection state transitions. +/* + * Force a portmap lookup of the remote lockd port */ void nlm_rebind_host(struct nlm_host *host) { - if (host->h_proto != IPPROTO_UDP) - return; - + dprintk("lockd: rebind host %s\n", host->h_name); if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) { rpc_force_rebind(host->h_rpcclnt); host->h_nextrebind = jiffies + NLM_HOST_REBIND; diff --git a/fs/namespace.c b/fs/namespace.c index 58c6f27b141d..796e13b654b8 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1850,20 +1850,6 @@ void drop_collected_mounts(struct vfsmount *mnt) namespace_unlock(); } -static bool has_locked_children(struct mount *mnt, struct dentry *dentry) -{ - struct mount *child; - - list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { - if (!is_subdir(child->mnt_mountpoint, dentry)) - continue; - - if (child->mnt.mnt_flags & MNT_LOCKED) - return true; - } - return false; -} - /** * clone_private_mount - create a private clone of a path * @@ -1878,27 +1864,16 @@ struct vfsmount *clone_private_mount(struct path *path) struct mount *old_mnt = real_mount(path->mnt); struct mount *new_mnt; - down_read(&namespace_sem); if (IS_MNT_UNBINDABLE(old_mnt)) - goto invalid; - - if (!check_mnt(old_mnt)) - goto invalid; - - if (has_locked_children(old_mnt, path->dentry)) - goto invalid; + return ERR_PTR(-EINVAL); + down_read(&namespace_sem); new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); up_read(&namespace_sem); - if (IS_ERR(new_mnt)) return ERR_CAST(new_mnt); return &new_mnt->mnt; - -invalid: - up_read(&namespace_sem); - return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(clone_private_mount); @@ -2214,6 +2189,19 @@ static int do_change_type(struct path *path, int flag) return err; } +static bool has_locked_children(struct mount *mnt, struct dentry *dentry) +{ + struct mount *child; + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + if (!is_subdir(child->mnt_mountpoint, dentry)) + continue; + + if (child->mnt.mnt_flags & MNT_LOCKED) + return true; + } + return false; +} + /* * do loopback mount. */ diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 55ebf9f4a824..c3428767332c 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -132,7 +132,7 @@ config PNFS_OBJLAYOUT config PNFS_FLEXFILE_LAYOUT tristate depends on NFS_V4_1 && NFS_V3 - default NFS_V4 + default m config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN string "NFSv4.1 Implementation ID Domain" diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ba2cd0bd3894..d6d5d2a48e83 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -377,7 +377,7 @@ nfs_get_client(const struct nfs_client_initdata *cl_init, if (cl_init->hostname == NULL) { WARN_ON(1); - return ERR_PTR(-EINVAL); + return NULL; } dprintk("--> nfs_get_client(%s,v%u)\n", diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 3f1ea498ecab..8e268965c96d 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -716,7 +716,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, if (unlikely(!p)) goto out_err; fl->fh_array[i]->size = be32_to_cpup(p++); - if (fl->fh_array[i]->size > NFS_MAXFHSIZE) { + if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { printk(KERN_ERR "NFS: Too big fh %d received %d\n", i, fl->fh_array[i]->size); goto out_err; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index e7f8732895b7..17771e157e92 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -86,7 +86,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) if (unlikely(!p)) return -ENOBUFS; fh->size = be32_to_cpup(p++); - if (fh->size > NFS_MAXFHSIZE) { + if (fh->size > sizeof(struct nfs_fh)) { printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", fh->size); return -EOVERFLOW; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0d7b8c6e1de8..d25b55ceb9d5 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1430,10 +1430,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle); */ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr) { - unsigned long attr_gencount = NFS_I(inode)->attr_gencount; + const struct nfs_inode *nfsi = NFS_I(inode); - return (long)(fattr->gencount - attr_gencount) > 0 || - (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0; + return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || + ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); } /* @@ -1849,7 +1849,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->attrtimeo_timestamp = now; } /* Set the barrier to be more recent than this fattr */ - if ((long)(fattr->gencount - nfsi->attr_gencount) > 0) + if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) nfsi->attr_gencount = fattr->gencount; } @@ -1964,7 +1964,7 @@ static int nfsiod_start(void) { struct workqueue_struct *wq; dprintk("RPC: creating workqueue nfsiod\n"); - wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); + wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); if (wq == NULL) return -ENOMEM; nfsiod_workqueue = wq; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7eeab683a81f..578350fd96e1 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -534,14 +534,12 @@ extern int nfs41_walk_client_list(struct nfs_client *clp, static inline struct inode *nfs_igrab_and_active(struct inode *inode) { - struct super_block *sb = inode->i_sb; - - if (sb && nfs_sb_active(sb)) { - if (igrab(inode)) - return inode; - nfs_sb_deactive(sb); + inode = igrab(inode); + if (inode != NULL && !nfs_sb_active(inode->i_sb)) { + iput(inode); + inode = NULL; } - return NULL; + return inode; } static inline void nfs_iput_and_deactive(struct inode *inode) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 9f365b004453..cb28cceefebe 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -363,7 +363,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, break; case NFS3_CREATE_UNCHECKED: - goto out_release_acls; + goto out; } nfs_fattr_init(data->res.dir_attr); nfs_fattr_init(data->res.fattr); @@ -708,7 +708,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, break; default: status = -EINVAL; - goto out_release_acls; + goto out; } status = nfs3_do_create(dir, dentry, data); diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 4a68837e92ea..267126d32ec0 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -33,7 +33,6 @@ */ #define NFS3_fhandle_sz (1+16) #define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */ -#define NFS3_post_op_fh_sz (1+NFS3_fh_sz) #define NFS3_sattr_sz (15) #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) @@ -71,7 +70,7 @@ #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1) #define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3) #define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4) -#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) +#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz)) #define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz) #define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 31cc6f3d992d..7f1a0fb8c493 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -168,10 +168,7 @@ static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) if (status) return status; - if (whence == SEEK_DATA && res.sr_eof) - return -NFS4ERR_NXIO; - else - return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); + return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); } loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 988d26202958..0ca482a51e53 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -439,7 +439,8 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp, status = decode_clone(xdr); if (status) goto out; - decode_getfattr(xdr, res->dst_fattr, res->server); + status = decode_getfattr(xdr, res->dst_fattr, res->server); + out: res->rpc_status = status; return status; diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 85e61756f3d1..92895f41d9a0 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -1218,11 +1218,8 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, goto out; } - if (server->nfs_client->cl_hostname == NULL) { + if (server->nfs_client->cl_hostname == NULL) server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); - if (server->nfs_client->cl_hostname == NULL) - return -ENOMEM; - } nfs_server_insert_lists(server); error = nfs_probe_destination(server); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 9b9c8e598436..c5e884585c23 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -168,7 +168,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) case SEEK_HOLE: case SEEK_DATA: ret = nfs42_proc_llseek(filep, offset, whence); - if (ret != -EOPNOTSUPP) + if (ret != -ENOTSUPP) return ret; default: return nfs_file_llseek(filep, offset, whence); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e10bada12361..64d15c2662db 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4047,12 +4047,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = d_inode(dentry); - struct nfs_server *server = NFS_SERVER(dir); struct nfs4_readdir_arg args = { .fh = NFS_FH(dir), .pages = pages, .pgbase = 0, .count = count, + .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, .plus = plus, }; struct nfs4_readdir_res res; @@ -4067,15 +4067,9 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, dentry, (unsigned long long)cookie); - if (!(server->caps & NFS_CAP_SECURITY_LABEL)) - args.bitmask = server->attr_bitmask_nl; - else - args.bitmask = server->attr_bitmask; - nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); res.pgbase = args.pgbase; - status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, - &res.seq_res, 0); + status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; @@ -4848,9 +4842,6 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); int ret, i; - /* You can't remove system.nfs4_acl: */ - if (buflen == 0) - return -EINVAL; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; if (npages > ARRAY_SIZE(pages)) @@ -4887,14 +4878,6 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen do { err = __nfs4_proc_set_acl(inode, buf, buflen); trace_nfs4_set_acl(inode, err); - if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { - /* - * no need to retry since the kernel - * isn't involved in encoding the ACEs. - */ - err = -EINVAL; - break; - } err = nfs4_handle_exception(NFS_SERVER(inode), err, &exception); } while (exception.retry); @@ -4933,7 +4916,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - return label.len; + return 0; } static int nfs4_get_security_label(struct inode *inode, void *buf, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e4773510eb9a..ef3ed2b1fd27 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1945,9 +1945,6 @@ static int nfs4_try_migration(struct nfs_server *server, struct rpc_cred *cred) } result = -NFS4ERR_NXIO; - if (!locations->nlocations) - goto out; - if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) { dprintk("<-- %s: No fs_locations data, migration skipped\n", __func__); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f80d2ef507a5..28c1b765e444 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3628,6 +3628,8 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st if (unlikely(!p)) goto out_overflow; n = be32_to_cpup(p); + if (n <= 0) + goto out_eio; for (res->nlocations = 0; res->nlocations < n; res->nlocations++) { u32 m; struct nfs4_fs_location *loc; @@ -4170,11 +4172,10 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, } else printk(KERN_WARNING "%s: label too long (%u)!\n", __func__, len); - if (label && label->label) - dprintk("%s: label=%.*s, len=%d, PI=%d, LFS=%d\n", - __func__, label->len, (char *)label->label, - label->len, label->pi, label->lfs); } + if (label && label->label) + dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__, + (char *)label->label, label->len, label->pi, label->lfs); return status; out_overflow: diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 18868e318b03..f5de58c5773f 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -993,16 +993,17 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) { struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); + if (!list_empty(&mirror->pg_list)) { int error = desc->pg_ops->pg_doio(desc); if (error < 0) desc->pg_error = error; - if (list_empty(&mirror->pg_list)) { + else mirror->pg_bytes_written += mirror->pg_count; - mirror->pg_count = 0; - mirror->pg_base = 0; - mirror->pg_recoalesce = 0; - } + } + if (list_empty(&mirror->pg_list)) { + mirror->pg_count = 0; + mirror->pg_base = 0; } } @@ -1088,6 +1089,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) do { list_splice_init(&mirror->pg_list, &head); + mirror->pg_bytes_written -= mirror->pg_count; mirror->pg_count = 0; mirror->pg_base = 0; mirror->pg_recoalesce = 0; diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index c21fca0dcba7..77d136ac8909 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c @@ -75,14 +75,10 @@ __state_in_grace(struct net *net, bool open) if (!open) return !list_empty(grace_list); - spin_lock(&grace_lock); list_for_each_entry(lm, grace_list, list) { - if (lm->block_opens) { - spin_unlock(&grace_lock); + if (lm->block_opens) return true; - } } - spin_unlock(&grace_lock); return false; } diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index a94c7a030d63..91146f025769 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -190,11 +190,6 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp, (unsigned long long) argp->offset, argp->stable? " stable" : ""); - resp->status = nfserr_fbig; - if (argp->offset > (u64)OFFSET_MAX || - argp->offset + argp->len > (u64)OFFSET_MAX) - return rpc_success; - fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; nfserr = nfsd_write(rqstp, &resp->fh, NULL, diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index d6c443a874f2..d4fa7fbc37dc 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -821,14 +821,9 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); - /* - * Don't return filehandle for ".." if we're at - * the filesystem or export root: - */ + /* filesystem root - cannot return filehandle for ".." */ if (dchild == dparent) goto out; - if (dparent == exp->ex_path.dentry) - goto out; } else dchild = dget(dparent); } else diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 2e128d38c818..c67064d94096 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -983,9 +983,8 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unsigned long cnt; int nvecs; - if (write->wr_offset > (u64)OFFSET_MAX || - write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX) - return nfserr_fbig; + if (write->wr_offset >= OFFSET_MAX) + return nfserr_inval; status = nfs4_preprocess_stateid_op(rqstp, cstate, stateid, WR_STATE, &filp, NULL); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 50bcfcf1ba7b..ea5cb1ba282f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -844,11 +844,6 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) return 0; } -static bool delegation_hashed(struct nfs4_delegation *dp) -{ - return !(list_empty(&dp->dl_perfile)); -} - static bool unhash_delegation_locked(struct nfs4_delegation *dp) { @@ -856,7 +851,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp) lockdep_assert_held(&state_lock); - if (!delegation_hashed(dp)) + if (list_empty(&dp->dl_perfile)) return false; dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; @@ -3198,10 +3193,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, - &old->cl_cred)) { - old = NULL; + &old->cl_cred)) goto out; - } status = mark_client_expired_locked(old); if (status) { old = NULL; @@ -3663,7 +3656,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) * queued for a lease break. Don't queue it again. */ spin_lock(&state_lock); - if (delegation_hashed(dp) && dp->dl_time == 0) { + if (dp->dl_time == 0) { dp->dl_time = get_seconds(); list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 87708608c0ff..ee0da259a3d3 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2988,18 +2988,15 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, goto fail; cd->rd_maxcount -= entry_bytes; /* - * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and - * notes that it could be zero. If it is zero, then the server - * should enforce only the rd_maxcount value. + * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so + * let's always let through the first entry, at least: */ - if (cd->rd_dircount) { - name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; - if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) - goto fail; - cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); - if (!cd->rd_dircount) - cd->rd_maxcount = 0; - } + if (!cd->rd_dircount) + goto fail; + name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; + if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) + goto fail; + cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); cd->cookie_offset = cookie_offset; skip_entry: diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index dfd1949b31ea..0cd57db5c5af 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -768,10 +768,7 @@ out_close: svc_xprt_put(xprt); } out_err: - if (!list_empty(&nn->nfsd_serv->sv_permsocks)) - nn->nfsd_serv->sv_nrthreads--; - else - nfsd_destroy(net); + nfsd_destroy(net); return err; } diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index 49a148ebbcda..bbb0dcc35905 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -73,9 +73,11 @@ static const struct sysfs_ops nilfs_##name##_attr_ops = { \ #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \ static void nilfs_##name##_attr_release(struct kobject *kobj) \ { \ - struct nilfs_sysfs_##parent_name##_subgroups *subgroups = container_of(kobj, \ - struct nilfs_sysfs_##parent_name##_subgroups, \ - sg_##name##_kobj); \ + struct nilfs_sysfs_##parent_name##_subgroups *subgroups; \ + struct the_nilfs *nilfs = container_of(kobj->parent, \ + struct the_nilfs, \ + ns_##parent_name##_kobj); \ + subgroups = nilfs->ns_##parent_name##_subgroups; \ complete(&subgroups->sg_##name##_kobj_unregister); \ } \ static struct kobj_type nilfs_##name##_ktype = { \ @@ -101,12 +103,12 @@ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \ err = kobject_init_and_add(kobj, &nilfs_##name##_ktype, parent, \ #name); \ if (err) \ - kobject_put(kobj); \ - return err; \ + return err; \ + return 0; \ } \ static void nilfs_sysfs_delete_##name##_group(struct the_nilfs *nilfs) \ { \ - kobject_put(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \ + kobject_del(&nilfs->ns_##parent_name##_subgroups->sg_##name##_kobj); \ } /************************************************************************ @@ -217,14 +219,14 @@ int nilfs_sysfs_create_snapshot_group(struct nilfs_root *root) } if (err) - kobject_put(&root->snapshot_kobj); + return err; - return err; + return 0; } void nilfs_sysfs_delete_snapshot_group(struct nilfs_root *root) { - kobject_put(&root->snapshot_kobj); + kobject_del(&root->snapshot_kobj); } /************************************************************************ @@ -1006,7 +1008,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb) err = kobject_init_and_add(&nilfs->ns_dev_kobj, &nilfs_dev_ktype, NULL, "%s", sb->s_id); if (err) - goto cleanup_dev_kobject; + goto free_dev_subgroups; err = nilfs_sysfs_create_mounted_snapshots_group(nilfs); if (err) @@ -1043,7 +1045,9 @@ delete_mounted_snapshots_group: nilfs_sysfs_delete_mounted_snapshots_group(nilfs); cleanup_dev_kobject: - kobject_put(&nilfs->ns_dev_kobj); + kobject_del(&nilfs->ns_dev_kobj); + +free_dev_subgroups: kfree(nilfs->ns_dev_subgroups); failed_create_device_group: @@ -1058,7 +1062,6 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs) nilfs_sysfs_delete_superblock_group(nilfs); nilfs_sysfs_delete_segctor_group(nilfs); kobject_del(&nilfs->ns_dev_kobj); - kobject_put(&nilfs->ns_dev_kobj); kfree(nilfs->ns_dev_subgroups); } diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 8d4d58b12972..38260c07de8b 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -502,7 +502,7 @@ err_corrupt_attr: } file_name_attr = (FILE_NAME_ATTR*)((u8*)attr + le16_to_cpu(attr->data.resident.value_offset)); - p2 = (u8 *)file_name_attr + le32_to_cpu(attr->data.resident.value_length); + p2 = (u8*)attr + le32_to_cpu(attr->data.resident.value_length); if (p2 < (u8*)attr || p2 > p) goto err_corrupt_attr; /* This attribute is ok, but is it in the $Extend directory? */ @@ -661,12 +661,6 @@ static int ntfs_read_locked_inode(struct inode *vi) } a = ctx->attr; /* Get the standard information attribute value. */ - if ((u8 *)a + le16_to_cpu(a->data.resident.value_offset) - + le32_to_cpu(a->data.resident.value_length) > - (u8 *)ctx->mrec + vol->mft_record_size) { - ntfs_error(vi->i_sb, "Corrupt standard information attribute in inode."); - goto unm_err_out; - } si = (STANDARD_INFORMATION*)((u8*)a + le16_to_cpu(a->data.resident.value_offset)); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 73c12b13fc3e..1d738723a41a 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -490,11 +490,10 @@ int ocfs2_truncate_file(struct inode *inode, * greater than page size, so we have to truncate them * anyway. */ + unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); + truncate_inode_pages(inode->i_mapping, new_i_size); if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - unmap_mapping_range(inode->i_mapping, - new_i_size + PAGE_SIZE - 1, 0, 1); - truncate_inode_pages(inode->i_mapping, new_i_size); status = ocfs2_truncate_inline(inode, di_bh, new_i_size, i_size_read(inode), 1); if (status) @@ -513,9 +512,6 @@ int ocfs2_truncate_file(struct inode *inode, goto bail_unlock_sem; } - unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); - truncate_inode_pages(inode->i_mapping, new_i_size); - status = ocfs2_commit_truncate(osb, inode, di_bh); if (status < 0) { mlog_errno(status); @@ -1536,45 +1532,6 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, } } -/* - * zero out partial blocks of one cluster. - * - * start: file offset where zero starts, will be made upper block aligned. - * len: it will be trimmed to the end of current cluster if "start + len" - * is bigger than it. - */ -static int ocfs2_zeroout_partial_cluster(struct inode *inode, - u64 start, u64 len) -{ - int ret; - u64 start_block, end_block, nr_blocks; - u64 p_block, offset; - u32 cluster, p_cluster, nr_clusters; - struct super_block *sb = inode->i_sb; - u64 end = ocfs2_align_bytes_to_clusters(sb, start); - - if (start + len < end) - end = start + len; - - start_block = ocfs2_blocks_for_bytes(sb, start); - end_block = ocfs2_blocks_for_bytes(sb, end); - nr_blocks = end_block - start_block; - if (!nr_blocks) - return 0; - - cluster = ocfs2_bytes_to_clusters(sb, start); - ret = ocfs2_get_clusters(inode, cluster, &p_cluster, - &nr_clusters, NULL); - if (ret) - return ret; - if (!p_cluster) - return 0; - - offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); - p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; - return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); -} - static int ocfs2_zero_partial_clusters(struct inode *inode, u64 start, u64 len) { @@ -1584,7 +1541,6 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int csize = osb->s_clustersize; handle_t *handle; - loff_t isize = i_size_read(inode); /* * The "start" and "end" values are NOT necessarily part of @@ -1605,26 +1561,6 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) goto out; - /* No page cache for EOF blocks, issue zero out to disk. */ - if (end > isize) { - /* - * zeroout eof blocks in last cluster starting from - * "isize" even "start" > "isize" because it is - * complicated to zeroout just at "start" as "start" - * may be not aligned with block size, buffer write - * would be required to do that, but out of eof buffer - * write is not supported. - */ - ret = ocfs2_zeroout_partial_cluster(inode, isize, - end - isize); - if (ret) { - mlog_errno(ret); - goto out; - } - if (start >= isize) - goto out; - end = isize; - } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); @@ -1933,7 +1869,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, { int ret; s64 llen; - loff_t size, orig_isize; + loff_t size; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *di_bh = NULL; handle_t *handle; @@ -2025,15 +1961,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, default: ret = -EINVAL; } - - orig_isize = i_size_read(inode); - /* zeroout eof blocks in the cluster. */ - if (!ret && change_size && orig_isize < size) { - ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, - size - orig_isize); - if (!ret) - i_size_write(inode, size); - } up_write(&OCFS2_I(inode)->ip_alloc_sem); if (ret) { mlog_errno(ret); @@ -2050,6 +1977,9 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, goto out_inode_unlock; } + if (change_size && i_size_read(inode) < size) + i_size_write(inode, size); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); if (ret < 0) diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 337f0628c378..0ee1f088bace 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2208,17 +2208,11 @@ static int ocfs2_initialize_super(struct super_block *sb, } if (ocfs2_clusterinfo_valid(osb)) { - /* - * ci_stack and ci_cluster in ocfs2_cluster_info may not be null - * terminated, so make sure no overflow happens here by using - * memcpy. Destination strings will always be null terminated - * because osb is allocated using kzalloc. - */ osb->osb_stackflags = OCFS2_RAW_SB(di)->s_cluster_info.ci_stackflags; - memcpy(osb->osb_cluster_stack, + strlcpy(osb->osb_cluster_stack, OCFS2_RAW_SB(di)->s_cluster_info.ci_stack, - OCFS2_STACK_LABEL_LEN); + OCFS2_STACK_LABEL_LEN + 1); if (strlen(osb->osb_cluster_stack) != OCFS2_STACK_LABEL_LEN) { mlog(ML_ERROR, "couldn't mount because of an invalid " @@ -2227,9 +2221,9 @@ static int ocfs2_initialize_super(struct super_block *sb, status = -EINVAL; goto bail; } - memcpy(osb->osb_cluster_name, + strlcpy(osb->osb_cluster_name, OCFS2_RAW_SB(di)->s_cluster_info.ci_cluster, - OCFS2_CLUSTER_NAME_LEN); + OCFS2_CLUSTER_NAME_LEN + 1); } else { /* The empty string is identical with classic tools that * don't know about s_cluster_info. */ diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index bf2c8ae8ed73..953c88dd6519 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -831,13 +831,9 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, } } else { new_create = true; - if (!d_is_negative(newdentry)) { - if (!new_opaque || !ovl_is_whiteout(newdentry)) - goto out_dput; - } else { - if (flags & RENAME_EXCHANGE) - goto out_dput; - } + if (!d_is_negative(newdentry) && + (!new_opaque || !ovl_is_whiteout(newdentry))) + goto out_dput; } if (olddentry == trap) diff --git a/fs/pipe.c b/fs/pipe.c index 37a003b645ef..6534470a6c19 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -27,21 +27,6 @@ #include "internal.h" -/* - * New pipe buffers will be restricted to this size while the user is exceeding - * their pipe buffer quota. The general pipe use case needs at least two - * buffers: one for data yet to be read, and one for new data. If this is less - * than two, then a write to a non-empty pipe may block even if the pipe is not - * full. This can occur with GNU make jobserver or similar uses of pipes as - * semaphores: multiple processes may be waiting to write tokens back to the - * pipe before reading tokens: https://lore.kernel.org/lkml/1628086770.5rn8p04n6j.none@localhost/. - * - * Users can reduce their pipe buffers with F_SETPIPE_SZ below this at their - * own risk, namely: pipe writes to non-full pipes may block until the pipe is - * emptied. - */ -#define PIPE_MIN_DEF_BUFFERS 2 - /* * The max size that a non-root user is allowed to grow the pipe. Can * be set by root in /proc/sys/fs/pipe-max-size @@ -636,7 +621,7 @@ struct pipe_inode_info *alloc_pipe_info(void) if (!too_many_pipe_buffers_hard(user)) { if (too_many_pipe_buffers_soft(user)) - pipe_bufs = PIPE_MIN_DEF_BUFFERS; + pipe_bufs = 1; pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); } diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 33008eea6bc5..993bb3b5f4d5 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -594,15 +594,12 @@ EXPORT_SYMBOL_GPL(posix_acl_create); /** * posix_acl_update_mode - update mode in set_acl - * @inode: target inode - * @mode_p: mode (pointer) for update - * @acl: acl pointer * * Update the file mode when setting an ACL: compute the new file permission * bits based on the ACL. In addition, if the ACL is equivalent to the new - * file mode, set *@acl to NULL to indicate that no ACL should be set. + * file mode, set *acl to NULL to indicate that no ACL should be set. * - * As with chmod, clear the setgid bit if the caller is not in the owning group + * As with chmod, clear the setgit bit if the caller is not in the owning group * or capable of CAP_FSETID (see inode_change_ok). * * Called from set_acl inode operations. diff --git a/fs/proc/array.c b/fs/proc/array.c index c4478abd1bef..d4b1c2361adf 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -169,45 +169,51 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, task_unlock(p); rcu_read_unlock(); - seq_printf(m, "State:\t%s", get_task_state(p)); - - seq_put_decimal_ull(m, "\nTgid:\t", tgid); - seq_put_decimal_ull(m, "\nPid:\t", pid_nr_ns(pid, ns)); - seq_put_decimal_ull(m, "\nPPid:\t", ppid); - seq_put_decimal_ull(m, "\nTracerPid:\t", tpid); - seq_put_decimal_ull(m, "\nUid:\t", from_kuid_munged(user_ns, cred->uid)); - seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->euid)); - seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->suid)); - seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->fsuid)); - seq_put_decimal_ull(m, "\nGid:\t", from_kgid_munged(user_ns, cred->gid)); - seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->egid)); - seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->sgid)); - seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->fsgid)); - seq_put_decimal_ull(m, "\nNgid:\t", ngid); - seq_put_decimal_ull(m, "\nFDSize:\t", max_fds); - - seq_puts(m, "\nGroups:\t"); + seq_printf(m, + "State:\t%s\n" + "Tgid:\t%d\n" + "Pid:\t%d\n" + "PPid:\t%d\n" + "TracerPid:\t%d\n" + "Uid:\t%d\t%d\t%d\t%d\n" + "Gid:\t%d\t%d\t%d\t%d\n" + "Ngid:\t%d\n" + "FDSize:\t%d\nGroups:\t", + get_task_state(p), + tgid, pid_nr_ns(pid, ns), ppid, tpid, + from_kuid_munged(user_ns, cred->uid), + from_kuid_munged(user_ns, cred->euid), + from_kuid_munged(user_ns, cred->suid), + from_kuid_munged(user_ns, cred->fsuid), + from_kgid_munged(user_ns, cred->gid), + from_kgid_munged(user_ns, cred->egid), + from_kgid_munged(user_ns, cred->sgid), + from_kgid_munged(user_ns, cred->fsgid), + ngid, max_fds); + group_info = cred->group_info; for (g = 0; g < group_info->ngroups; g++) - seq_put_decimal_ull(m, g ? " " : "", - from_kgid_munged(user_ns, GROUP_AT(group_info, g))); + seq_printf(m, "%d ", + from_kgid_munged(user_ns, GROUP_AT(group_info, g))); put_cred(cred); - /* Trailing space shouldn't have been added in the first place. */ - seq_putc(m, ' '); #ifdef CONFIG_PID_NS seq_puts(m, "\nNStgid:"); for (g = ns->level; g <= pid->level; g++) - seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns)); + seq_printf(m, "\t%d", + task_tgid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpid:"); for (g = ns->level; g <= pid->level; g++) - seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns)); + seq_printf(m, "\t%d", + task_pid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpgid:"); for (g = ns->level; g <= pid->level; g++) - seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns)); + seq_printf(m, "\t%d", + task_pgrp_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSsid:"); for (g = ns->level; g <= pid->level; g++) - seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns)); + seq_printf(m, "\t%d", + task_session_nr_ns(p, pid->numbers[g].ns)); #endif seq_putc(m, '\n'); } @@ -276,12 +282,11 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) unlock_task_sighand(p, &flags); } - seq_put_decimal_ull(m, "Threads:\t", num_threads); - seq_put_decimal_ull(m, "\nSigQ:\t", qsize); - seq_put_decimal_ull(m, "/", qlim); + seq_printf(m, "Threads:\t%d\n", num_threads); + seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim); /* render them all */ - render_sigset_t(m, "\nSigPnd:\t", &pending); + render_sigset_t(m, "SigPnd:\t", &pending); render_sigset_t(m, "ShdPnd:\t", &shpending); render_sigset_t(m, "SigBlk:\t", &blocked); render_sigset_t(m, "SigIgn:\t", &ignored); @@ -326,8 +331,7 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p) static inline void task_seccomp(struct seq_file *m, struct task_struct *p) { #ifdef CONFIG_SECCOMP - seq_put_decimal_ull(m, "Seccomp:\t", p->seccomp.mode); - seq_putc(m, '\n'); + seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode); #endif seq_printf(m, "Speculation_Store_Bypass:\t"); switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { @@ -359,9 +363,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) static inline void task_context_switch_counts(struct seq_file *m, struct task_struct *p) { - seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw); - seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw); - seq_putc(m, '\n'); + seq_printf(m, "voluntary_ctxt_switches:\t%lu\n" + "nonvoluntary_ctxt_switches:\t%lu\n", + p->nvcsw, + p->nivcsw); } static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) @@ -505,41 +510,41 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, start_time = nsec_to_clock_t(task->real_start_time); seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state); - seq_put_decimal_ll(m, " ", ppid); - seq_put_decimal_ll(m, " ", pgid); - seq_put_decimal_ll(m, " ", sid); - seq_put_decimal_ll(m, " ", tty_nr); - seq_put_decimal_ll(m, " ", tty_pgrp); - seq_put_decimal_ull(m, " ", task->flags); - seq_put_decimal_ull(m, " ", min_flt); - seq_put_decimal_ull(m, " ", cmin_flt); - seq_put_decimal_ull(m, " ", maj_flt); - seq_put_decimal_ull(m, " ", cmaj_flt); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime)); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime)); - seq_put_decimal_ll(m, " ", priority); - seq_put_decimal_ll(m, " ", nice); - seq_put_decimal_ll(m, " ", num_threads); - seq_put_decimal_ull(m, " ", 0); - seq_put_decimal_ull(m, " ", start_time); - seq_put_decimal_ull(m, " ", vsize); - seq_put_decimal_ull(m, " ", mm ? get_mm_rss(mm) : 0); - seq_put_decimal_ull(m, " ", rsslim); - seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->start_code : 1) : 0); - seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->end_code : 1) : 0); - seq_put_decimal_ull(m, " ", (permitted && mm) ? mm->start_stack : 0); - seq_put_decimal_ull(m, " ", esp); - seq_put_decimal_ull(m, " ", eip); + seq_put_decimal_ll(m, ' ', ppid); + seq_put_decimal_ll(m, ' ', pgid); + seq_put_decimal_ll(m, ' ', sid); + seq_put_decimal_ll(m, ' ', tty_nr); + seq_put_decimal_ll(m, ' ', tty_pgrp); + seq_put_decimal_ull(m, ' ', task->flags); + seq_put_decimal_ull(m, ' ', min_flt); + seq_put_decimal_ull(m, ' ', cmin_flt); + seq_put_decimal_ull(m, ' ', maj_flt); + seq_put_decimal_ull(m, ' ', cmaj_flt); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime)); + seq_put_decimal_ll(m, ' ', priority); + seq_put_decimal_ll(m, ' ', nice); + seq_put_decimal_ll(m, ' ', num_threads); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', start_time); + seq_put_decimal_ull(m, ' ', vsize); + seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0); + seq_put_decimal_ull(m, ' ', rsslim); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0); + seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0); + seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0); + seq_put_decimal_ull(m, ' ', esp); + seq_put_decimal_ull(m, ' ', eip); /* The signal information here is obsolete. * It must be decimal for Linux 2.0 compatibility. * Use /proc/#/status for real-time signals. */ - seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL); - seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL); - seq_put_decimal_ull(m, " ", sigign.sig[0] & 0x7fffffffUL); - seq_put_decimal_ull(m, " ", sigcatch.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL); + seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL); /* * We used to output the absolute kernel address, but that's an @@ -553,31 +558,31 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, else seq_puts(m, " 0"); - seq_put_decimal_ull(m, " ", 0); - seq_put_decimal_ull(m, " ", 0); - seq_put_decimal_ll(m, " ", task->exit_signal); - seq_put_decimal_ll(m, " ", task_cpu(task)); - seq_put_decimal_ull(m, " ", task->rt_priority); - seq_put_decimal_ull(m, " ", task->policy); - seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); - seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime)); - seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime)); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ll(m, ' ', task->exit_signal); + seq_put_decimal_ll(m, ' ', task_cpu(task)); + seq_put_decimal_ull(m, ' ', task->rt_priority); + seq_put_decimal_ull(m, ' ', task->policy); + seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task)); + seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime)); + seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime)); if (mm && permitted) { - seq_put_decimal_ull(m, " ", mm->start_data); - seq_put_decimal_ull(m, " ", mm->end_data); - seq_put_decimal_ull(m, " ", mm->start_brk); - seq_put_decimal_ull(m, " ", mm->arg_start); - seq_put_decimal_ull(m, " ", mm->arg_end); - seq_put_decimal_ull(m, " ", mm->env_start); - seq_put_decimal_ull(m, " ", mm->env_end); + seq_put_decimal_ull(m, ' ', mm->start_data); + seq_put_decimal_ull(m, ' ', mm->end_data); + seq_put_decimal_ull(m, ' ', mm->start_brk); + seq_put_decimal_ull(m, ' ', mm->arg_start); + seq_put_decimal_ull(m, ' ', mm->arg_end); + seq_put_decimal_ull(m, ' ', mm->env_start); + seq_put_decimal_ull(m, ' ', mm->env_end); } else - seq_puts(m, " 0 0 0 0 0 0 0"); + seq_printf(m, " 0 0 0 0 0 0 0"); if (permitted) - seq_put_decimal_ll(m, " ", task->exit_code); + seq_put_decimal_ll(m, ' ', task->exit_code); else - seq_puts(m, " 0"); + seq_put_decimal_ll(m, ' ', 0); seq_putc(m, '\n'); if (mm) @@ -613,13 +618,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", * size, resident, shared, text, data); */ - seq_put_decimal_ull(m, "", size); - seq_put_decimal_ull(m, " ", resident); - seq_put_decimal_ull(m, " ", shared); - seq_put_decimal_ull(m, " ", text); - seq_put_decimal_ull(m, " ", 0); - seq_put_decimal_ull(m, " ", data); - seq_put_decimal_ull(m, " ", 0); + seq_put_decimal_ull(m, 0, size); + seq_put_decimal_ull(m, ' ', resident); + seq_put_decimal_ull(m, ' ', shared); + seq_put_decimal_ull(m, ' ', text); + seq_put_decimal_ull(m, ' ', 0); + seq_put_decimal_ull(m, ' ', data); + seq_put_decimal_ull(m, ' ', 0); seq_putc(m, '\n'); return 0; diff --git a/fs/proc/base.c b/fs/proc/base.c index 630aa8cbc409..6ff8dd57cc97 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -894,7 +894,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, flags |= FOLL_WRITE; while (count > 0) { - size_t this_len = min_t(size_t, count, PAGE_SIZE); + int this_len = min_t(int, count, PAGE_SIZE); if (write && copy_from_user(page, buf, this_len)) { copied = -EFAULT; @@ -1469,6 +1469,204 @@ static const struct file_operations proc_pid_sched_operations = { #endif +/* + * Print out various scheduling related per-task fields: + */ + +#ifdef CONFIG_SMP + +static int sched_wake_up_idle_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + seq_printf(m, "%d\n", sched_get_wake_up_idle(p)); + + put_task_struct(p); + + return 0; +} + +static ssize_t +sched_wake_up_idle_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *p; + char buffer[PROC_NUMBUF]; + int wake_up_idle, err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &wake_up_idle); + if (err) + goto out; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + err = sched_set_wake_up_idle(p, wake_up_idle); + + put_task_struct(p); + +out: + return err < 0 ? err : count; +} + +static int sched_wake_up_idle_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_wake_up_idle_show, inode); +} + +static const struct file_operations proc_pid_sched_wake_up_idle_operations = { + .open = sched_wake_up_idle_open, + .read = seq_read, + .write = sched_wake_up_idle_write, + .llseek = seq_lseek, + .release = single_release, +}; + +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_SCHED_HMP + +static int sched_init_task_load_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + seq_printf(m, "%d\n", sched_get_init_task_load(p)); + + put_task_struct(p); + + return 0; +} + +static ssize_t +sched_init_task_load_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *p; + char buffer[PROC_NUMBUF]; + int init_task_load, err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &init_task_load); + if (err) + goto out; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + err = sched_set_init_task_load(p, init_task_load); + + put_task_struct(p); + +out: + return err < 0 ? err : count; +} + +static int sched_init_task_load_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_init_task_load_show, inode); +} + +static const struct file_operations proc_pid_sched_init_task_load_operations = { + .open = sched_init_task_load_open, + .read = seq_read, + .write = sched_init_task_load_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int sched_group_id_show(struct seq_file *m, void *v) +{ + struct inode *inode = m->private; + struct task_struct *p; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + seq_printf(m, "%d\n", sched_get_group_id(p)); + + put_task_struct(p); + + return 0; +} + +static ssize_t +sched_group_id_write(struct file *file, const char __user *buf, + size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + struct task_struct *p; + char buffer[PROC_NUMBUF]; + int group_id, err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &group_id); + if (err) + goto out; + + p = get_proc_task(inode); + if (!p) + return -ESRCH; + + err = sched_set_group_id(p, group_id); + + put_task_struct(p); + +out: + return err < 0 ? err : count; +} + +static int sched_group_id_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_group_id_show, inode); +} + +static const struct file_operations proc_pid_sched_group_id_operations = { + .open = sched_group_id_open, + .read = seq_read, + .write = sched_group_id_write, + .llseek = seq_lseek, + .release = single_release, +}; + +#endif /* CONFIG_SCHED_HMP */ + #ifdef CONFIG_SCHED_AUTOGROUP /* * Print out autogroup related information: @@ -2501,13 +2699,6 @@ out: } #ifdef CONFIG_SECURITY -static int proc_pid_attr_open(struct inode *inode, struct file *file) -{ - file->private_data = NULL; - __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS); - return 0; -} - static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { @@ -2537,10 +2728,6 @@ static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, ssize_t length; struct task_struct *task = get_proc_task(inode); - /* A task may only write when it was the opener. */ - if (file->private_data != current->mm) - return -EPERM; - length = -ESRCH; if (!task) goto out_no_task; @@ -2579,11 +2766,9 @@ out_no_task: } static const struct file_operations proc_pid_attr_operations = { - .open = proc_pid_attr_open, .read = proc_pid_attr_read, .write = proc_pid_attr_write, .llseek = generic_file_llseek, - .release = mem_release, }; static const struct pid_entry attr_dir_stuff[] = { @@ -2922,6 +3107,13 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), ONE("limits", S_IRUGO, proc_pid_limits), +#ifdef CONFIG_SMP + REG("sched_wake_up_idle", S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations), +#endif +#ifdef CONFIG_SCHED_HMP + REG("sched_init_task_load", S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations), + REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations), +#endif #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif diff --git a/fs/proc/cpu_time_stat.c b/fs/proc/cpu_time_stat.c index be8dafd9bbf7..d0ca09b4f70a 100644 --- a/fs/proc/cpu_time_stat.c +++ b/fs/proc/cpu_time_stat.c @@ -118,16 +118,16 @@ static int show_cpu_time_stat(struct seq_file *p, void *v) sum += arch_irq_stat(); seq_puts(p, "cpu "); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); for_each_possible_cpu(i) { @@ -143,16 +143,16 @@ static int show_cpu_time_stat(struct seq_file *p, void *v) guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); } return 0; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 107db4f559e0..21f198aa0961 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -507,15 +507,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) return -EFAULT; } else { if (kern_addr_valid(start)) { + unsigned long n; + /* * Using bounce buffer to bypass the * hardened user copy kernel text checks. */ - if (probe_kernel_read(buf, (void *) start, tsz)) { - if (clear_user(buffer, tsz)) - return -EFAULT; - } else { - if (copy_to_user(buffer, buf, tsz)) + memcpy(buf, (char *) start, tsz); + n = copy_to_user(buffer, buf, tsz); + /* + * We cannot distinguish between fault on source + * and fault on destination. When this happens + * we clear too and hope it will trigger the + * EFAULT again. + */ + if (n) { + if (clear_user(buffer + tsz - n, + n)) return -EFAULT; } } else { diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 0fd3f81fe51f..510413eb25b8 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -117,16 +117,17 @@ static int show_stat(struct seq_file *p, void *v) } sum += arch_irq_stat(); - seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_puts(p, "cpu "); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); for_each_online_cpu(i) { @@ -142,23 +143,23 @@ static int show_stat(struct seq_file *p, void *v) guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); + seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); } - seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); + seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) - seq_put_decimal_ull(p, " ", kstat_irqs_usr(j)); + seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j)); seq_printf(p, "\nctxt %llu\n" @@ -172,10 +173,10 @@ static int show_stat(struct seq_file *p, void *v) nr_running(), nr_iowait()); - seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); + seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < NR_SOFTIRQS; i++) - seq_put_decimal_ull(p, " ", per_softirq_sums[i]); + seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); seq_putc(p, '\n'); return 0; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 785d05e3358c..08143139b65a 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -105,19 +105,14 @@ static ssize_t read_from_oldmem(char *buf, size_t count, nr_bytes = count; /* If pfn is not ram, return zeros for sparse dump files */ - if (pfn_is_ram(pfn) == 0) { - tmp = 0; - if (!userbuf) - memset(buf, 0, nr_bytes); - else if (clear_user(buf, nr_bytes)) - tmp = -EFAULT; - } else { + if (pfn_is_ram(pfn) == 0) + memset(buf, 0, nr_bytes); + else { tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); + if (tmp < 0) + return tmp; } - if (tmp < 0) - return tmp; - *ppos += nr_bytes; count -= nr_bytes; buf += nr_bytes; diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index 613cc38c9efa..b218f965817b 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -14,48 +14,13 @@ #include #include "qnx4.h" -/* - * A qnx4 directory entry is an inode entry or link info - * depending on the status field in the last byte. The - * first byte is where the name start either way, and a - * zero means it's empty. - * - * Also, due to a bug in gcc, we don't want to use the - * real (differently sized) name arrays in the inode and - * link entries, but always the 'de_name[]' one in the - * fake struct entry. - * - * See - * - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6 - * - * for details, but basically gcc will take the size of the - * 'name' array from one of the used union entries randomly. - * - * This use of 'de_name[]' (48 bytes) avoids the false positive - * warnings that would happen if gcc decides to use 'inode.di_name' - * (16 bytes) even when the pointer and size were to come from - * 'link.dl_name' (48 bytes). - * - * In all cases the actual name pointer itself is the same, it's - * only the gcc internal 'what is the size of this field' logic - * that can get confused. - */ -union qnx4_directory_entry { - struct { - const char de_name[48]; - u8 de_pad[15]; - u8 de_status; - }; - struct qnx4_inode_entry inode; - struct qnx4_link_info link; -}; - static int qnx4_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); unsigned int offset; struct buffer_head *bh; + struct qnx4_inode_entry *de; + struct qnx4_link_info *le; unsigned long blknum; int ix, ino; int size; @@ -72,27 +37,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx) } ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { - union qnx4_directory_entry *de; - offset = ix * QNX4_DIR_ENTRY_SIZE; - de = (union qnx4_directory_entry *) (bh->b_data + offset); - - if (!de->de_name[0]) + de = (struct qnx4_inode_entry *) (bh->b_data + offset); + if (!de->di_fname[0]) continue; - if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) + if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK))) continue; - if (!(de->de_status & QNX4_FILE_LINK)) { - size = sizeof(de->inode.di_fname); + if (!(de->di_status & QNX4_FILE_LINK)) + size = QNX4_SHORT_NAME_MAX; + else + size = QNX4_NAME_MAX; + size = strnlen(de->di_fname, size); + QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname)); + if (!(de->di_status & QNX4_FILE_LINK)) ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1; - } else { - size = sizeof(de->link.dl_fname); - ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) * + else { + le = (struct qnx4_link_info*)de; + ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) * QNX4_INODES_PER_BLOCK + - de->link.dl_inode_ndx; + le->dl_inode_ndx; } - size = strnlen(de->de_name, size); - QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name)); - if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) { + if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { brelse(bh); return 0; } diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 1bb72c4f3187..b7d5e254792c 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -669,14 +669,9 @@ int dquot_quota_sync(struct super_block *sb, int type) /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) { - ret = sb->s_op->sync_fs(sb, 1); - if (ret) - return ret; - } - ret = sync_blockdev(sb->s_bdev); - if (ret) - return ret; + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, 1); + sync_blockdev(sb->s_bdev); /* * Now when everything is written we can discard the pagecache so diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 4f21724a212b..58efb83dec1c 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -55,7 +55,7 @@ static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) memset(buf, 0, info->dqi_usable_bs); return sb->s_op->quota_read(sb, info->dqi_type, buf, - info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); } static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) @@ -64,7 +64,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) ssize_t ret; ret = sb->s_op->quota_write(sb, info->dqi_type, buf, - info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); + info->dqi_usable_bs, blk << info->dqi_blocksize_bits); if (ret != info->dqi_usable_bs) { quota_error(sb, "dquota write failed"); if (ret >= 0) @@ -277,7 +277,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info, blk); goto out_buf; } - dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + + dquot->dq_off = (blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; kfree(buf); @@ -416,7 +416,6 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, quota_error(dquot->dq_sb, "Quota structure has offset to " "other block (%u) than it should (%u)", blk, (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); - ret = -EIO; goto out_buf; } ret = read_blk(info, blk, buf); @@ -482,13 +481,6 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, goto out_buf; } newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); - if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) { - quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", - newblk, info->dqi_blocks); - ret = -EUCLEAN; - goto out_buf; - } - if (depth == info->dqi_qtree_depth - 1) { ret = free_dqentry(info, dquot, newblk); newblk = 0; @@ -560,7 +552,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, ret = -EIO; goto out_buf; } else { - ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct + ret = (blk << info->dqi_blocksize_bits) + sizeof(struct qt_disk_dqdbheader) + i * info->dqi_entry_size; } out_buf: @@ -588,13 +580,6 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); if (!blk) /* No reference? */ goto out_buf; - if (blk < QT_TREEOFF || blk >= info->dqi_blocks) { - quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", - blk, info->dqi_blocks); - ret = -EUCLEAN; - goto out_buf; - } - if (depth < info->dqi_qtree_depth - 1) ret = find_tree_dqentry(info, dquot, blk, depth+1); else diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 6a0fa0cdc1ed..00985f9db9f7 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2770,20 +2770,6 @@ int journal_init(struct super_block *sb, const char *j_dev_name, goto free_and_return; } - /* - * Sanity check to see if journal first block is correct. - * If journal first block is invalid it can cause - * zeroing important superblock members. - */ - if (!SB_ONDISK_JOURNAL_DEVICE(sb) && - SB_ONDISK_JOURNAL_1st_BLOCK(sb) < SB_JOURNAL_1st_RESERVED_BLOCK(sb)) { - reiserfs_warning(sb, "journal-1393", - "journal 1st super block is invalid: 1st reserved block %d, but actual 1st block is %d", - SB_JOURNAL_1st_RESERVED_BLOCK(sb), - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); - goto free_and_return; - } - if (journal_init_dev(sb, journal, j_dev_name) != 0) { reiserfs_warning(sb, "sh-462", "unable to initialize journal device"); diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 13322c39e6cc..e3a4cbad9620 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -386,24 +386,6 @@ void pathrelse(struct treepath *search_path) search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET; } -static int has_valid_deh_location(struct buffer_head *bh, struct item_head *ih) -{ - struct reiserfs_de_head *deh; - int i; - - deh = B_I_DEH(bh, ih); - for (i = 0; i < ih_entry_count(ih); i++) { - if (deh_location(&deh[i]) > ih_item_len(ih)) { - reiserfs_warning(NULL, "reiserfs-5094", - "directory entry location seems wrong %h", - &deh[i]); - return 0; - } - } - - return 1; -} - static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) { struct block_head *blkh; @@ -471,15 +453,6 @@ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh) "(second one): %h", ih); return 0; } - if (is_direntry_le_ih(ih)) { - if (ih_item_len(ih) < (ih_entry_count(ih) * IH_SIZE)) { - reiserfs_warning(NULL, "reiserfs-5093", - "item entry count seems wrong %h", - ih); - return 0; - } - return has_valid_deh_location(bh, ih); - } prev_location = ih_location(ih); } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 2ffcbe451202..503d8c06e0d9 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -2050,14 +2050,6 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) unlock_new_inode(root_inode); } - if (!S_ISDIR(root_inode->i_mode) || !inode_get_bytes(root_inode) || - !root_inode->i_size) { - SWARN(silent, s, "", "corrupt root inode, run fsck"); - iput(root_inode); - errval = -EUCLEAN; - goto error; - } - s->s_root = d_make_root(root_inode); if (!s->s_root) goto error; diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index 19ca3745301f..613ff5aef94e 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -42,7 +42,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec); static inline int reiserfs_xattrs_initialized(struct super_block *sb) { - return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root; + return REISERFS_SB(sb)->priv_root != NULL; } #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c index eb279de02ffb..7beacf41ab17 100644 --- a/fs/sdcardfs/dentry.c +++ b/fs/sdcardfs/dentry.c @@ -17,6 +17,11 @@ * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2018 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include "sdcardfs.h" #include "linux/ctype.h" @@ -126,7 +131,10 @@ out: /* 1 = delete, 0 = cache */ static int sdcardfs_d_delete(const struct dentry *d) { - return SDCARDFS_SB(d->d_sb)->options.nocache ? 1 : 0; + if (SDCARDFS_SB(d->d_sb)->options.nocache) + return d->d_inode && !S_ISDIR(d->d_inode->i_mode); + + return 0; } static void sdcardfs_d_release(struct dentry *dentry) diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c index 271c4c4cb760..79028bbda86d 100644 --- a/fs/sdcardfs/file.c +++ b/fs/sdcardfs/file.c @@ -17,6 +17,11 @@ * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include "sdcardfs.h" #ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE @@ -255,6 +260,7 @@ static int sdcardfs_open(struct inode *inode, struct file *file) goto out_err; } + file->f_mode |= FMODE_NONMAPPABLE; file->private_data = kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL); if (!SDCARDFS_F(file)) { @@ -432,6 +438,11 @@ out: return err; } +static struct file *sdcardfs_get_lower_file(struct file *f) +{ + return sdcardfs_lower_file(f); +} + const struct file_operations sdcardfs_main_fops = { .llseek = generic_file_llseek, .read = sdcardfs_read, @@ -448,6 +459,7 @@ const struct file_operations sdcardfs_main_fops = { .fasync = sdcardfs_fasync, .read_iter = sdcardfs_read_iter, .write_iter = sdcardfs_write_iter, + .get_lower_file = sdcardfs_get_lower_file, }; /* trimmed directory options */ @@ -464,4 +476,5 @@ const struct file_operations sdcardfs_dir_fops = { .flush = sdcardfs_flush, .fsync = sdcardfs_fsync, .fasync = sdcardfs_fasync, + .get_lower_file = sdcardfs_get_lower_file, }; diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c index ab0952f13510..1649af3ea7be 100644 --- a/fs/sdcardfs/inode.c +++ b/fs/sdcardfs/inode.c @@ -17,6 +17,11 @@ * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2016 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include "sdcardfs.h" #include @@ -101,7 +106,7 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry, current->fs = copied_fs; task_unlock(current); - err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl); + err = vfs_create2(lower_dentry_mnt, lower_parent_dentry->d_inode, lower_dentry, mode, want_excl); if (err) goto out; @@ -259,7 +264,7 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode current->fs = copied_fs; task_unlock(current); - err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode); + err = vfs_mkdir2(lower_mnt, lower_parent_dentry->d_inode, lower_dentry, mode); if (err) { unlock_dir(lower_parent_dentry); @@ -770,7 +775,11 @@ static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry, goto out; sdcardfs_copy_and_fix_attrs(d_inode(dentry), d_inode(lower_path.dentry)); + fsstack_copy_inode_size(d_inode(dentry), + d_inode(lower_path.dentry)); err = sdcardfs_fillattr(mnt, d_inode(dentry), &lower_stat, stat); + fsstack_copy_inode_size(d_inode(dentry), + d_inode(lower_path.dentry)); out: sdcardfs_put_lower_path(dentry, &lower_path); return err; diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c index a671ae2338ea..262ea543d5b3 100644 --- a/fs/sdcardfs/lookup.c +++ b/fs/sdcardfs/lookup.c @@ -17,9 +17,15 @@ * under the terms of the Apache 2.0 License OR version 2 of the GNU * General Public License. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include "sdcardfs.h" #include "linux/delay.h" +#include /* The dentry cache is just so we have properly sized dentries */ static struct kmem_cache *sdcardfs_dentry_cachep; @@ -112,6 +118,10 @@ struct inode *sdcardfs_iget(struct super_block *sb, struct inode *lower_inode, u /* if found a cached inode, then just return it (after iput) */ if (!(inode->i_state & I_NEW)) { iput(lower_inode); + /* There can only be one alias, as we don't permit hard links + * This ensures we do not keep stale dentries that would later + * cause confusion. */ + d_prune_aliases(inode); return inode; } @@ -242,6 +252,89 @@ static int sdcardfs_name_match(struct dir_context *ctx, const char *name, return 0; } +/* The dir context used by sdcardfs_lower_filldir() */ +struct sdcardfs_lower_getent_cb { + struct dir_context ctx; + loff_t pos; + const char *target; /* search target */ + int target_len; + char alias[NAME_MAX+1]; /* alias name found in lower dir */ + int alias_len; + int result; /* 0: found, -ENOENT: not found. */ +}; + +/* The filldir used by case insensitive search in sdcardfs_ci_path_lookup() */ +static int +sdcardfs_lower_filldir(struct dir_context *ctx, const char *name, int namelen, + loff_t offset, u64 ino, unsigned int d_type) +{ + struct sdcardfs_lower_getent_cb *buf; + + buf = container_of(ctx, struct sdcardfs_lower_getent_cb, ctx); + + if (!buf->result) /* entry already found, skip search */ + return 0; + + buf->pos = buf->ctx.pos; + if (!strncasecmp(name, buf->target, namelen) && + namelen == buf->target_len) { + strlcpy(buf->alias, name, namelen + 1); + buf->alias_len = namelen; + buf->result = 0; /* 0: found matching entry */ + } + return 0; +} + +/* + * Case insentively lookup lower directory. + * + * @folder: path to the lower folder. + * @name: lookup name. + * @entry: path to the found entry. + * + * Returns: 0 (ok), -ENOENT (entry not found) + */ +static int sdcardfs_ci_path_lookup(struct path *folder, const char *name, + struct path *entry) +{ + int ret = 0; + struct file *filp; + loff_t last_pos; + struct sdcardfs_lower_getent_cb buf = { + .ctx.actor = sdcardfs_lower_filldir, + .ctx.pos = 0, + .pos = 0, + .target = name, + .alias_len = 0, + .result = -ENOENT + }; + + + buf.target_len = strlen(name); + + filp = dentry_open(folder, O_RDONLY | O_DIRECTORY, current_cred()); + + if (IS_ERR_OR_NULL(filp)) + return -ENOENT; + + while (ret >= 0) { + last_pos = filp->f_pos; + ret = iterate_dir(filp, &buf.ctx); + /* reaches end or found matching entry */ + if (last_pos == filp->f_pos || !buf.result) + break; + } + + filp_close(filp, NULL); + + if (!buf.result) + return vfs_path_lookup(folder->dentry, folder->mnt, buf.alias, + 0, entry); + else + return buf.result; + +} + /* * Main driver function for sdcardfs's lookup. * @@ -314,6 +407,36 @@ put_name: __putname(buffer.name); } + /* If the dentry was not found, and the intent is not rename file, + * try case insensitive search in lower parent directory. + */ + if ((err == -ENOENT) && !(flags & LOOKUP_RENAME_TARGET)) + err = sdcardfs_ci_path_lookup(lower_parent_path, name->name, &lower_path); +#if 0 + /* check for other cases */ + if (err == -ENOENT) { + struct dentry *child; + struct dentry *match = NULL; + spin_lock(&lower_dir_dentry->d_lock); + list_for_each_entry(child, &lower_dir_dentry->d_subdirs, d_child) { + if (child && child->d_inode) { + if (strcasecmp(child->d_name.name, name)==0) { + match = dget(child); + break; + } + } + } + spin_unlock(&lower_dir_dentry->d_lock); + if (match) { + err = vfs_path_lookup(lower_dir_dentry, + lower_dir_mnt, + match->d_name.name, 0, + &lower_path); + dput(match); + } + } +#endif + /* no error: handle positive dentries */ if (!err) { /* check if the dentry is an obb dentry diff --git a/fs/sdfat/Kconfig b/fs/sdfat/Kconfig index a9fba01c420a..08b12f7f768b 100644 --- a/fs/sdfat/Kconfig +++ b/fs/sdfat/Kconfig @@ -68,18 +68,15 @@ config SDFAT_CHECK_RO_ATTR config SDFAT_ALIGNED_MPAGE_WRITE bool "Enable supporting aligned mpage_write" - default y if SDFAT_FS=y - default n if SDFAT_FS=m + default y depends on SDFAT_FS config SDFAT_VIRTUAL_XATTR bool "Virtual xattr support for sdFAT" - default n + default y depends on SDFAT_FS help - If you enable this feature, it supports virtual xattr. - This feature will be deprecated because it might be the same with - "context" mount option. + To support virtual xattr. config SDFAT_VIRTUAL_XATTR_SELINUX_LABEL string "Default string for SELinux label" diff --git a/fs/sdfat/amap_smart.c b/fs/sdfat/amap_smart.c index 8bfb22481374..b556f868d76e 100644 --- a/fs/sdfat/amap_smart.c +++ b/fs/sdfat/amap_smart.c @@ -708,8 +708,7 @@ static inline AU_INFO_T *amap_get_packing_au(AMAP_T *amap, int dest, int num_to_ } } - if ((PACKING_HARDLIMIT != 0) && - amap->n_need_packing >= PACKING_HARDLIMIT) { + if ((PACKING_HARDLIMIT) && amap->n_need_packing >= PACKING_HARDLIMIT) { /* Compulsory SLC flushing: * If there was no chance to do best-fit packing * and the # of AU-aligned allocation exceeds HARD threshold, diff --git a/fs/sdfat/blkdev.c b/fs/sdfat/blkdev.c index 788b7c034afc..264c670df0f0 100644 --- a/fs/sdfat/blkdev.c +++ b/fs/sdfat/blkdev.c @@ -96,6 +96,7 @@ s32 bdev_check_bdi_valid(struct super_block *sb) fsi->prev_eio |= SDFAT_EIO_BDI; sdfat_log_msg(sb, KERN_ERR, "%s: block device is " "eliminated.(bdi:%p)", __func__, sb->s_bdi); + sdfat_debug_warn_on(1); } return -ENXIO; } @@ -103,13 +104,18 @@ s32 bdev_check_bdi_valid(struct super_block *sb) return 0; } -#if IS_BUILTIN(CONFIG_SDFAT_FS) -static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs) + +/* Make a readahead request */ +s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs) { + FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); u32 sects_per_page = (PAGE_SIZE >> sb->s_blocksize_bits); struct blk_plug plug; u64 i; + if (!fsi->bd_opened) + return -EIO; + blk_start_plug(&plug); for (i = 0; i < num_secs; i++) { if (i && !(i & (sects_per_page - 1))) @@ -117,26 +123,6 @@ static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs) sb_breadahead(sb, (sector_t)(secno + i)); } blk_finish_plug(&plug); -} -#else -static void __bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs) -{ - u64 i; - - for (i = 0; i < num_secs; i++) - sb_breadahead(sb, (sector_t)(secno + i)); -} -#endif - -/* Make a readahead request */ -s32 bdev_readahead(struct super_block *sb, u64 secno, u64 num_secs) -{ - FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); - - if (!fsi->bd_opened) - return -EIO; - - __bdev_readahead(sb, secno, num_secs); return 0; } diff --git a/fs/sdfat/core.c b/fs/sdfat/core.c index 51d2ea23b47e..3a5af0b83d59 100644 --- a/fs/sdfat/core.c +++ b/fs/sdfat/core.c @@ -158,7 +158,7 @@ static s32 __fs_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_s /* skip updating volume dirty flag, * if this volume has been mounted with read-only */ - if (sb_rdonly(sb)) + if (sb->s_flags & MS_RDONLY) return 0; if (!fsi->pbr_bh) { @@ -177,8 +177,7 @@ static s32 __fs_set_vol_flags(struct super_block *sb, u16 new_flag, s32 always_s bpb->bsx.state = new_flag & VOL_DIRTY ? FAT_VOL_DIRTY : 0x00; } else { /* FAT16/12 */ pbr16_t *bpb = (pbr16_t *) fsi->pbr_bh->b_data; - bpb->bpb.f16.state = new_flag & VOL_DIRTY ? - FAT_VOL_DIRTY : 0x00; + bpb->bpb.state = new_flag & VOL_DIRTY ? FAT_VOL_DIRTY : 0x00; } if (always_sync) @@ -1656,7 +1655,7 @@ static bool is_exfat(pbr_t *pbr) static bool is_fat32(pbr_t *pbr) { - if (le16_to_cpu(pbr->bpb.fat.num_fat_sectors)) + if (le16_to_cpu(pbr->bpb.f16.num_fat_sectors)) return false; return true; } @@ -1669,7 +1668,7 @@ inline pbr_t *read_pbr_with_logical_sector(struct super_block *sb, struct buffer if (is_exfat(p_pbr)) logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits; else - logical_sect = get_unaligned_le16(&p_pbr->bpb.fat.sect_size); + logical_sect = get_unaligned_le16(&p_pbr->bpb.f16.sect_size); /* is x a power of 2? * (x) != 0 && (((x) & ((x) - 1)) == 0) @@ -1781,7 +1780,7 @@ s32 fscore_mount(struct super_block *sb) opts->improved_allocation = 0; opts->defrag = 0; ret = mount_exfat(sb, p_pbr); - } else { + } else if (is_fat32(p_pbr)) { if (opts->fs_type && opts->fs_type != FS_TYPE_VFAT) { sdfat_log_msg(sb, KERN_ERR, "not specified filesystem type " @@ -1792,14 +1791,21 @@ s32 fscore_mount(struct super_block *sb) } /* set maximum file size for FAT */ sb->s_maxbytes = 0xffffffff; - - if (is_fat32(p_pbr)) { - ret = mount_fat32(sb, p_pbr); - } else { - opts->improved_allocation = 0; - opts->defrag = 0; - ret = mount_fat16(sb, p_pbr); + ret = mount_fat32(sb, p_pbr); + } else { + if (opts->fs_type && opts->fs_type != FS_TYPE_VFAT) { + sdfat_log_msg(sb, KERN_ERR, + "not specified filesystem type " + "(media:vfat, opts:%s)", + FS_TYPE_STR[opts->fs_type]); + ret = -EINVAL; + goto free_bh; } + /* set maximum file size for FAT */ + sb->s_maxbytes = 0xffffffff; + opts->improved_allocation = 0; + opts->defrag = 0; + ret = mount_fat16(sb, p_pbr); } free_bh: brelse(tmp_bh); @@ -1811,9 +1817,8 @@ free_bh: /* warn misaligned data data start sector must be a multiple of clu_size */ sdfat_log_msg(sb, KERN_INFO, "detected volume info : %s " - "(%04hX-%04hX, bps : %lu, spc : %u, data start : %llu, %s)", + "(bps : %lu, spc : %u, data start : %llu, %s)", sdfat_get_vol_type_str(fsi->vol_type), - (fsi->vol_id >> 16) & 0xffff, fsi->vol_id & 0xffff, sb->s_blocksize, fsi->sect_per_clus, fsi->data_start_sector, (fsi->data_start_sector & (fsi->sect_per_clus - 1)) ? "misaligned" : "aligned"); @@ -2369,7 +2374,7 @@ s32 fscore_write_link(struct inode *inode, FILE_ID_T *fid, void *buffer, u64 cou ep2 = ep; } - fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY); + fsi->fs_func->set_entry_time(ep, tm_now(SDFAT_SB(sb), &tm), TM_MODIFY); fsi->fs_func->set_entry_attr(ep, fid->attr); if (modified) { @@ -2576,7 +2581,7 @@ s32 fscore_truncate(struct inode *inode, u64 old_size, u64 new_size) ep2 = ep; } - fsi->fs_func->set_entry_time(ep, tm_now(inode, &tm), TM_MODIFY); + fsi->fs_func->set_entry_time(ep, tm_now(SDFAT_SB(sb), &tm), TM_MODIFY); fsi->fs_func->set_entry_attr(ep, fid->attr); /* diff --git a/fs/sdfat/core.h b/fs/sdfat/core.h index a03f8c0a168a..1f8ed5a28ef3 100644 --- a/fs/sdfat/core.h +++ b/fs/sdfat/core.h @@ -60,15 +60,7 @@ typedef struct { void *__buf; // __buf should be the last member } ENTRY_SET_CACHE_T; -/*----------------------------------------------------------------------*/ -/* Inline Functions */ -/*----------------------------------------------------------------------*/ -static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus) -{ - if (clus < CLUS_BASE || fsi->num_clusters <= clus) - return false; - return true; -} + /*----------------------------------------------------------------------*/ /* External Function Declarations */ diff --git a/fs/sdfat/core_exfat.c b/fs/sdfat/core_exfat.c index d5f8ad335c5b..9e4b994d0e90 100644 --- a/fs/sdfat/core_exfat.c +++ b/fs/sdfat/core_exfat.c @@ -220,27 +220,20 @@ static void exfat_set_entry_size(DENTRY_T *p_entry, u64 size) ep->size = cpu_to_le64(size); } /* end of exfat_set_entry_size */ - -#define TENS_MS_PER_SEC (100) -#define SEC_TO_TENS_MS(sec) (((sec) & 0x01) ? TENS_MS_PER_SEC : 0) -#define TENS_MS_TO_SEC(tens_ms) (((tens_ms) / TENS_MS_PER_SEC) ? 1 : 0) - static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode) { - u16 t = 0x00, d = 0x21, tz = 0x00, s = 0x00; + u16 t = 0x00, d = 0x21, tz = 0x00; FILE_DENTRY_T *ep = (FILE_DENTRY_T *)p_entry; switch (mode) { case TM_CREATE: t = le16_to_cpu(ep->create_time); d = le16_to_cpu(ep->create_date); - s = TENS_MS_TO_SEC(ep->create_time_ms); tz = ep->create_tz; break; case TM_MODIFY: t = le16_to_cpu(ep->modify_time); d = le16_to_cpu(ep->modify_date); - s = TENS_MS_TO_SEC(ep->modify_time_ms); tz = ep->modify_tz; break; case TM_ACCESS: @@ -251,7 +244,7 @@ static void exfat_get_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode) } tp->tz.value = tz; - tp->sec = ((t & 0x001F) << 1) + s; + tp->sec = (t & 0x001F) << 1; tp->min = (t >> 5) & 0x003F; tp->hour = (t >> 11); tp->day = (d & 0x001F); @@ -270,14 +263,12 @@ static void exfat_set_entry_time(DENTRY_T *p_entry, TIMESTAMP_T *tp, u8 mode) switch (mode) { case TM_CREATE: ep->create_time = cpu_to_le16(t); - ep->create_time_ms = SEC_TO_TENS_MS(tp->sec); ep->create_date = cpu_to_le16(d); ep->create_tz = tp->tz.value; break; case TM_MODIFY: ep->modify_time = cpu_to_le16(t); ep->modify_date = cpu_to_le16(d); - ep->modify_time_ms = (tp->sec & 0x1) ? TENS_MS_PER_SEC : 0; ep->modify_tz = tp->tz.value; break; case TM_ACCESS: @@ -295,10 +286,12 @@ static void __init_file_entry(struct super_block *sb, FILE_DENTRY_T *ep, u32 typ exfat_set_entry_type((DENTRY_T *) ep, type); - tp = tm_now_sb(sb, &tm); + tp = tm_now(SDFAT_SB(sb), &tm); exfat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE); exfat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY); exfat_set_entry_time((DENTRY_T *) ep, tp, TM_ACCESS); + ep->create_time_ms = 0; + ep->modify_time_ms = 0; } /* end of __init_file_entry */ static void __init_strm_entry(STRM_DENTRY_T *ep, u8 flags, u32 start_clu, u64 size) @@ -1286,7 +1279,7 @@ static s32 exfat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_r } /* check cluster validation */ - if (!is_valid_clus(fsi, p_chain->dir)) { + if ((p_chain->dir < 2) && (p_chain->dir >= fsi->num_clusters)) { EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir); sdfat_debug_bug_on(1); return -EIO; @@ -1374,13 +1367,9 @@ static s32 exfat_alloc_cluster(struct super_block *sb, u32 num_alloc, CHAIN_T *p } /* check cluster validation */ - if (!is_valid_clus(fsi, hint_clu)) { - /* "last + 1" can be passed as hint_clu. Otherwise, bug_on */ - if (hint_clu != fsi->num_clusters) { - EMSG("%s: hint_cluster is invalid (%u)\n", - __func__, hint_clu); - sdfat_debug_bug_on(1); - } + if ((hint_clu < CLUS_BASE) && (hint_clu >= fsi->num_clusters)) { + EMSG("%s: hint_cluster is invalid (%u)\n", __func__, hint_clu); + ASSERT(0); hint_clu = CLUS_BASE; if (p_chain->flags == 0x03) { if (exfat_chain_cont_cluster(sb, p_chain->dir, num_clusters)) @@ -1519,53 +1508,43 @@ s32 mount_exfat(struct super_block *sb, pbr_t *p_pbr) pbr64_t *p_bpb = (pbr64_t *)p_pbr; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); - fsi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits; - fsi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits; - fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits; - fsi->cluster_size = 1 << fsi->cluster_size_bits; - if (!p_bpb->bsx.num_fats) { sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure"); return -EINVAL; } - if (p_bpb->bsx.num_fats >= 2) { - sdfat_msg(sb, KERN_WARNING, - "unsupported number of FAT structure :%u, try with 1", - p_bpb->bsx.num_fats); - } + fsi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits; + fsi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits; + fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits; + fsi->cluster_size = 1 << fsi->cluster_size_bits; fsi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length); - if (!fsi->num_FAT_sectors) { - sdfat_msg(sb, KERN_ERR, "bogus fat size"); - return -EINVAL; - } fsi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset); - fsi->FAT2_start_sector = fsi->FAT1_start_sector; + if (p_bpb->bsx.num_fats == 1) + fsi->FAT2_start_sector = fsi->FAT1_start_sector; + else + fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors; fsi->root_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset); fsi->data_start_sector = fsi->root_start_sector; fsi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length); - if (!fsi->num_sectors) { - sdfat_msg(sb, KERN_ERR, "bogus number of total sector count"); - return -EINVAL; - } - + fsi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + 2; /* because the cluster index starts with 2 */ - fsi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) + CLUS_BASE; + fsi->vol_type = EXFAT; fsi->vol_id = le32_to_cpu(p_bpb->bsx.vol_serial); + fsi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster); fsi->dentries_in_root = 0; fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS); + fsi->vol_flag = (u32) le16_to_cpu(p_bpb->bsx.vol_flags); fsi->clu_srch_ptr = CLUS_BASE; fsi->used_clusters = (u32) ~0; fsi->fs_func = &exfat_fs_func; - fsi->vol_type = EXFAT; fat_ent_ops_init(sb); if (p_bpb->bsx.vol_flags & VOL_DIRTY) { diff --git a/fs/sdfat/core_fat.c b/fs/sdfat/core_fat.c index 23c134fc5541..5e0a196ae42b 100644 --- a/fs/sdfat/core_fat.c +++ b/fs/sdfat/core_fat.c @@ -176,7 +176,7 @@ static s32 fat_free_cluster(struct super_block *sb, CHAIN_T *p_chain, s32 do_rel } /* check cluster validation */ - if (!is_valid_clus(fsi, p_chain->dir)) { + if ((p_chain->dir < 2) && (p_chain->dir >= fsi->num_clusters)) { EMSG("%s: invalid start cluster (%u)\n", __func__, p_chain->dir); sdfat_debug_bug_on(1); return -EIO; @@ -479,7 +479,7 @@ static void __init_dos_entry(struct super_block *sb, DOS_DENTRY_T *ep, u32 type, ep->start_clu_hi = cpu_to_le16(CLUSTER_16(start_clu >> 16)); ep->size = 0; - tp = tm_now_sb(sb, &tm); + tp = tm_now(SDFAT_SB(sb), &tm); fat_set_entry_time((DENTRY_T *) ep, tp, TM_CREATE); fat_set_entry_time((DENTRY_T *) ep, tp, TM_MODIFY); ep->access_date = 0; @@ -1238,70 +1238,36 @@ static FS_FUNC_T amap_fat_fs_func = { .get_au_stat = amap_get_au_stat, }; -static s32 mount_fat_common(struct super_block *sb, FS_INFO_T *fsi, - bpb_t *p_bpb, u32 root_sects) +s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr) { - bool fat32 = root_sects == 0 ? true : false; + s32 num_root_sectors; + bpb16_t *p_bpb = &(p_pbr->bpb.f16); + FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); - fsi->sect_per_clus = p_bpb->sect_per_clus; - if (!is_power_of_2(fsi->sect_per_clus)) { - sdfat_msg(sb, KERN_ERR, "bogus sectors per cluster %u", - fsi->sect_per_clus); + if (!p_bpb->num_fats) { + sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure"); return -EINVAL; } + num_root_sectors = get_unaligned_le16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS; + num_root_sectors = ((num_root_sectors-1) >> sb->s_blocksize_bits) + 1; + + fsi->sect_per_clus = p_bpb->sect_per_clus; fsi->sect_per_clus_bits = ilog2(p_bpb->sect_per_clus); fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits; fsi->cluster_size = 1 << fsi->cluster_size_bits; - fsi->dentries_per_clu = 1 << - (fsi->cluster_size_bits - DENTRY_SIZE_BITS); - - fsi->vol_flag = VOL_CLEAN; - fsi->clu_srch_ptr = CLUS_BASE; - fsi->used_clusters = (u32)~0; - fsi->fs_func = &fat_fs_func; fsi->num_FAT_sectors = le16_to_cpu(p_bpb->num_fat_sectors); - if (fat32) { - u32 fat32_len = le32_to_cpu(p_bpb->f32.num_fat32_sectors); - - if (fat32_len) { - fsi->num_FAT_sectors = fat32_len; - } else if (fsi->num_FAT_sectors) { - /* SPEC violation for compatibility */ - sdfat_msg(sb, KERN_WARNING, - "no fatsz32, try with fatsz16: %u", - fsi->num_FAT_sectors); - } - } - - if (!fsi->num_FAT_sectors) { - sdfat_msg(sb, KERN_ERR, "bogus fat size"); - return -EINVAL; - } - - if (!p_bpb->num_fats) { - sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure"); - return -EINVAL; - } - - if (p_bpb->num_fats > 2) { - sdfat_msg(sb, KERN_WARNING, - "unsupported number of FAT structure :%u, try with 2", - p_bpb->num_fats); - } fsi->FAT1_start_sector = le16_to_cpu(p_bpb->num_reserved); if (p_bpb->num_fats == 1) fsi->FAT2_start_sector = fsi->FAT1_start_sector; else - fsi->FAT2_start_sector = fsi->FAT1_start_sector + - fsi->num_FAT_sectors; + fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors; fsi->root_start_sector = fsi->FAT2_start_sector + fsi->num_FAT_sectors; - fsi->data_start_sector = fsi->root_start_sector + root_sects; + fsi->data_start_sector = fsi->root_start_sector + num_root_sectors; - /* SPEC violation for compatibility */ fsi->num_sectors = get_unaligned_le16(p_bpb->num_sectors); if (!fsi->num_sectors) fsi->num_sectors = le32_to_cpu(p_bpb->num_huge_sectors); @@ -1311,20 +1277,15 @@ static s32 mount_fat_common(struct super_block *sb, FS_INFO_T *fsi, return -EINVAL; } + fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >> fsi->sect_per_clus_bits) + CLUS_BASE; /* because the cluster index starts with 2 */ - fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >> - fsi->sect_per_clus_bits) + CLUS_BASE; - return 0; -} + fsi->vol_type = FAT16; + if (fsi->num_clusters < FAT12_THRESHOLD) + fsi->vol_type = FAT12; -s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr) -{ - u32 num_root_sectors; - bpb_t *p_bpb = &(p_pbr->bpb.fat); - FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); + fsi->vol_id = get_unaligned_le32(p_bpb->vol_serial); - fsi->vol_id = get_unaligned_le32(p_bpb->f16.vol_serial); fsi->root_dir = 0; fsi->dentries_in_root = get_unaligned_le16(p_bpb->num_root_entries); if (!fsi->dentries_in_root) { @@ -1333,18 +1294,16 @@ s32 mount_fat16(struct super_block *sb, pbr_t *p_pbr) return -EINVAL; } - num_root_sectors = fsi->dentries_in_root << DENTRY_SIZE_BITS; - num_root_sectors = ((num_root_sectors - 1) >> sb->s_blocksize_bits) + 1; + fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS); - if (mount_fat_common(sb, fsi, p_bpb, num_root_sectors)) - return -EINVAL; + fsi->vol_flag = VOL_CLEAN; + fsi->clu_srch_ptr = 2; + fsi->used_clusters = (u32) ~0; - fsi->vol_type = FAT16; - if (fsi->num_clusters < FAT12_THRESHOLD) - fsi->vol_type = FAT12; + fsi->fs_func = &fat_fs_func; fat_ent_ops_init(sb); - if (p_bpb->f16.state & FAT_VOL_DIRTY) { + if (p_bpb->state & FAT_VOL_DIRTY) { fsi->vol_flag |= VOL_DIRTY; sdfat_log_msg(sb, KERN_WARNING, "Volume was not properly " "unmounted. Some data may be corrupt. " @@ -1388,26 +1347,67 @@ s32 mount_fat32(struct super_block *sb, pbr_t *p_pbr) pbr32_t *p_bpb = (pbr32_t *)p_pbr; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); - fsi->vol_id = get_unaligned_le32(p_bpb->bsx.vol_serial); - fsi->root_dir = le32_to_cpu(p_bpb->bpb.f32.root_cluster); - fsi->dentries_in_root = 0; + if (!p_bpb->bpb.num_fats) { + sdfat_msg(sb, KERN_ERR, "bogus number of FAT structure"); + return -EINVAL; + } + + fsi->sect_per_clus = p_bpb->bpb.sect_per_clus; + fsi->sect_per_clus_bits = ilog2(p_bpb->bpb.sect_per_clus); + fsi->cluster_size_bits = fsi->sect_per_clus_bits + sb->s_blocksize_bits; + fsi->cluster_size = 1 << fsi->cluster_size_bits; + + fsi->num_FAT_sectors = le32_to_cpu(p_bpb->bpb.num_fat32_sectors); + + fsi->FAT1_start_sector = le16_to_cpu(p_bpb->bpb.num_reserved); + if (p_bpb->bpb.num_fats == 1) + fsi->FAT2_start_sector = fsi->FAT1_start_sector; + else + fsi->FAT2_start_sector = fsi->FAT1_start_sector + fsi->num_FAT_sectors; + + fsi->root_start_sector = fsi->FAT2_start_sector + fsi->num_FAT_sectors; + fsi->data_start_sector = fsi->root_start_sector; + + /* SPEC violation for compatibility */ + fsi->num_sectors = get_unaligned_le16(p_bpb->bpb.num_sectors); + if (!fsi->num_sectors) + fsi->num_sectors = le32_to_cpu(p_bpb->bpb.num_huge_sectors); - if (mount_fat_common(sb, fsi, &p_bpb->bpb, 0)) + /* 2nd check */ + if (!fsi->num_sectors) { + sdfat_msg(sb, KERN_ERR, "bogus number of total sector count"); return -EINVAL; + } + + fsi->num_clusters = (u32)((fsi->num_sectors - fsi->data_start_sector) >> fsi->sect_per_clus_bits) + CLUS_BASE; + /* because the cluster index starts with 2 */ - /* Should be initialized before calling amap_create() */ fsi->vol_type = FAT32; - fat_ent_ops_init(sb); + fsi->vol_id = get_unaligned_le32(p_bpb->bsx.vol_serial); + + fsi->root_dir = le32_to_cpu(p_bpb->bpb.root_cluster); + fsi->dentries_in_root = 0; + fsi->dentries_per_clu = 1 << (fsi->cluster_size_bits - DENTRY_SIZE_BITS); + + fsi->vol_flag = VOL_CLEAN; + fsi->clu_srch_ptr = 2; + fsi->used_clusters = (u32) ~0; + + fsi->fs_func = &fat_fs_func; /* Delayed / smart allocation related init */ fsi->reserved_clusters = 0; + /* Should be initialized before calling amap_create() */ + fat_ent_ops_init(sb); + /* AU Map Creation */ if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART) { u32 hidden_sectors = le32_to_cpu(p_bpb->bpb.num_hid_sectors); u32 calc_hid_sect = 0; int ret; + /* calculate hidden sector size */ calc_hid_sect = __calc_hidden_sect(sb); if (calc_hid_sect != hidden_sectors) { diff --git a/fs/sdfat/fatent.c b/fs/sdfat/fatent.c index 442a3797fd6e..fca32a50d336 100644 --- a/fs/sdfat/fatent.c +++ b/fs/sdfat/fatent.c @@ -355,6 +355,13 @@ static inline bool is_reserved_clus(u32 clus) return false; } +static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus) +{ + if (clus < CLUS_BASE || fsi->num_clusters <= clus) + return false; + return true; +} + s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content) { FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); diff --git a/fs/sdfat/misc.c b/fs/sdfat/misc.c index f657389e156e..a006e898816f 100644 --- a/fs/sdfat/misc.c +++ b/fs/sdfat/misc.c @@ -55,6 +55,15 @@ /************************************************************************* * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY *************************************************************************/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +#define CURRENT_TIME_SEC timespec64_trunc(current_kernel_time64(), NSEC_PER_SEC) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) +#define CURRENT_TIME_SEC timespec_trunc(current_kernel_time(), NSEC_PER_SEC) +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */ + /* EMPTY */ +#endif + + #ifdef CONFIG_SDFAT_UEVENT static struct kobject sdfat_uevent_kobj; @@ -85,10 +94,6 @@ void sdfat_uevent_ro_remount(struct super_block *sb) char major[16], minor[16]; char *envp[] = { major, minor, NULL }; - /* Do not trigger uevent if a device has been ejected */ - if (fsapi_check_bdi_valid(sb)) - return; - snprintf(major, sizeof(major), "MAJOR=%d", MAJOR(bd_dev)); snprintf(minor, sizeof(minor), "MINOR=%d", MINOR(bd_dev)); @@ -122,7 +127,7 @@ void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) pr_err("[SDFAT](%s[%d:%d]):ERR: %pV\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf); #ifdef CONFIG_SDFAT_SUPPORT_STLOG - if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) { + if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { ST_LOG("[SDFAT](%s[%d:%d]):ERR: %pV\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev), &vaf); } @@ -133,8 +138,8 @@ void __sdfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) if (opts->errors == SDFAT_ERRORS_PANIC) { panic("[SDFAT](%s[%d:%d]): fs panic from previous error\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev)); - } else if (opts->errors == SDFAT_ERRORS_RO && !sb_rdonly(sb)) { - sb->s_flags |= SB_RDONLY; + } else if (opts->errors == SDFAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { + sb->s_flags |= MS_RDONLY; sdfat_statistics_set_mnt_ro(); pr_err("[SDFAT](%s[%d:%d]): Filesystem has been set " "read-only\n", sb->s_id, MAJOR(bd_dev), MINOR(bd_dev)); @@ -328,12 +333,12 @@ void sdfat_time_unix2fat(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts, tp->Year = year; } -TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tp) +TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tp) { - sdfat_timespec_t ts = current_time(inode); + sdfat_timespec_t ts = CURRENT_TIME_SEC; DATE_TIME_T dt; - sdfat_time_unix2fat(SDFAT_SB(inode->i_sb), &ts, &dt); + sdfat_time_unix2fat(sbi, &ts, &dt); tp->year = dt.Year; tp->mon = dt.Month; diff --git a/fs/sdfat/mpage.c b/fs/sdfat/mpage.c index 4f5037768fa5..f550fbb2204a 100644 --- a/fs/sdfat/mpage.c +++ b/fs/sdfat/mpage.c @@ -70,9 +70,6 @@ #ifdef CONFIG_SDFAT_ALIGNED_MPAGE_WRITE -#define MIN_ALIGNED_SIZE (PAGE_SIZE) -#define MIN_ALIGNED_SIZE_MASK (MIN_ALIGNED_SIZE - 1) - /************************************************************************* * INNER FUNCTIONS FOR FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY *************************************************************************/ @@ -100,14 +97,6 @@ static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_ { unmap_underlying_metadata(bdev, block); } - -static inline int wbc_to_write_flags(struct writeback_control *wbc) -{ - if (wbc->sync_mode == WB_SYNC_ALL) - return WRITE_SYNC; - - return 0; -} #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) @@ -247,9 +236,6 @@ static inline unsigned int __calc_size_to_align(struct super_block *sb) if (aligned && (max_sectors & (aligned - 1))) aligned = 0; - - if (aligned && aligned < (MIN_ALIGNED_SIZE >> SECTOR_SIZE_BITS)) - aligned = 0; out: return aligned; } @@ -262,24 +248,6 @@ struct mpage_data { unsigned int size_to_align; }; -/* - * After completing I/O on a page, call this routine to update the page - * flags appropriately - */ -static void __page_write_endio(struct page *page, int err) -{ - if (err) { - struct address_space *mapping; - - SetPageError(page); - mapping = page_mapping(page); - if (mapping) - mapping_set_error(mapping, err); - } - __dfr_writepage_end_io(page); - end_page_writeback(page); -} - /* * I/O completion handler for multipage BIOs. * @@ -294,37 +262,25 @@ static void __page_write_endio(struct page *page, int err) */ static void __mpage_write_end_io(struct bio *bio, int err) { - struct bio_vec *bv; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) - struct bvec_iter_all iter_all; + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; ASSERT(bio_data_dir(bio) == WRITE); /* only write */ - /* Use bio_for_each_segemnt_all() to support multi-page bvec */ - bio_for_each_segment_all(bv, bio, iter_all) - __page_write_endio(bv->bv_page, err); -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0) - struct bvec_iter_all iter_all; - int i; - - ASSERT(bio_data_dir(bio) == WRITE); /* only write */ - - /* Use bio_for_each_segemnt_all() to support multi-page bvec */ - bio_for_each_segment_all(bv, bio, i, iter_all) - __page_write_endio(bv->bv_page, err); -#else - ASSERT(bio_data_dir(bio) == WRITE); /* only write */ - bv = bio->bi_io_vec + bio->bi_vcnt - 1; - do { - struct page *page = bv->bv_page; + struct page *page = bvec->bv_page; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + if (err) { + SetPageError(page); + if (page->mapping) + mapping_set_error(page->mapping, err); + } - if (--bv >= bio->bi_io_vec) - prefetchw(&bv->bv_page->flags); + __dfr_writepage_end_io(page); - __page_write_endio(page, err); - } while (bv >= bio->bi_io_vec); -#endif + end_page_writeback(page); + } while (bvec >= bio->bi_io_vec); bio_put(bio); } @@ -356,65 +312,6 @@ mpage_alloc(struct block_device *bdev, return bio; } - -#if IS_BUILTIN(CONFIG_SDFAT_FS) -#define __write_boundary_block write_boundary_block -#define sdfat_buffer_heads_over_limit buffer_heads_over_limit -#else - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) -/* - * Called when we've recently written block `bblock', and it is known that - * `bblock' was for a buffer_boundary() buffer. This means that the block at - * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's - * dirty, schedule it for IO. So that indirects merge nicely with their data. - */ -static void __write_boundary_block(struct block_device *bdev, - sector_t bblock, unsigned int blocksize) -{ - struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); - - if (bh) { - if (buffer_dirty(bh)) - ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); - put_bh(bh); - } -} -#else -#warning "Need an alternative of write_boundary_block function" -#define __write_boundary_block write_boundary_block -#endif - -#warning "sdfat could not check buffer_heads_over_limit on module. Assumed zero" -#define sdfat_buffer_heads_over_limit (0) -#endif - -static void clean_buffers(struct page *page, unsigned int first_unmapped) -{ - unsigned int buffer_counter = 0; - struct buffer_head *bh, *head; - - if (!page_has_buffers(page)) - return; - head = page_buffers(page); - bh = head; - - do { - if (buffer_counter++ == first_unmapped) - break; - clear_buffer_dirty(bh); - bh = bh->b_this_page; - } while (bh != head); - - /* - * we cannot drop the bh if the page is not uptodate or a concurrent - * readpage would fail to serialize with the bh and it would read from - * disk before we reach the platter. - */ - if (sdfat_buffer_heads_over_limit && PageUptodate(page)) - try_to_free_buffers(page); -} - static int sdfat_mpage_writepage(struct page *page, struct writeback_control *wbc, void *data) { @@ -438,7 +335,6 @@ static int sdfat_mpage_writepage(struct page *page, loff_t i_size = i_size_read(inode); unsigned long end_index = i_size >> PAGE_SHIFT; int ret = 0; - int op_flags = wbc_to_write_flags(wbc); if (page_has_buffers(page)) { struct buffer_head *head = page_buffers(page); @@ -594,25 +490,22 @@ page_is_mapped: */ if (bio) { if (mpd->last_block_in_bio != blocks[0] - 1) { - bio = mpage_bio_submit_write(op_flags, bio); + bio = mpage_bio_submit_write(0, bio); } else if (mpd->size_to_align) { unsigned int mask = mpd->size_to_align - 1; sector_t max_end_block = (__sdfat_bio_sector(bio) & ~(mask)) + mask; - if ((__sdfat_bio_size(bio) & MIN_ALIGNED_SIZE_MASK) && + if ((__sdfat_bio_size(bio) != (1 << (mask + 1))) && (mpd->last_block_in_bio == max_end_block)) { - int op_nomerge = op_flags | REQ_NOMERGE; - MMSG("%s(inode:%p) alignment mpage_bio_submit" - "(start:%u, len:%u size:%u aligned:%u)\n", + "(start:%u, len:%u aligned:%u)\n", __func__, inode, (unsigned int)__sdfat_bio_sector(bio), (unsigned int)(mpd->last_block_in_bio - __sdfat_bio_sector(bio) + 1), - (unsigned int)__sdfat_bio_size(bio), (unsigned int)mpd->size_to_align); - bio = mpage_bio_submit_write(op_nomerge, bio); + bio = mpage_bio_submit_write(REQ_NOMERGE, bio); } } } @@ -632,7 +525,7 @@ alloc_new: */ length = first_unmapped << blkbits; if (bio_add_page(bio, page, length, 0) < length) { - bio = mpage_bio_submit_write(op_flags, bio); + bio = mpage_bio_submit_write(0, bio); goto alloc_new; } @@ -640,7 +533,26 @@ alloc_new: * OK, we have our BIO, so we can now mark the buffers clean. Make * sure to only clean buffers which we know we'll be writing. */ - clean_buffers(page, first_unmapped); + if (page_has_buffers(page)) { + struct buffer_head *head = page_buffers(page); + struct buffer_head *bh = head; + unsigned int buffer_counter = 0; + + do { + if (buffer_counter++ == first_unmapped) + break; + clear_buffer_dirty(bh); + bh = bh->b_this_page; + } while (bh != head); + + /* + * we cannot drop the bh if the page is not uptodate + * or a concurrent readpage would fail to serialize with the bh + * and it would read from disk before we reach the platter. + */ + if (buffer_heads_over_limit && PageUptodate(page)) + try_to_free_buffers(page); + } BUG_ON(PageWriteback(page)); set_page_writeback(page); @@ -667,9 +579,9 @@ alloc_new: unlock_page(page); if (boundary || (first_unmapped != blocks_per_page)) { - bio = mpage_bio_submit_write(op_flags, bio); + bio = mpage_bio_submit_write(0, bio); if (boundary_block) { - __write_boundary_block(boundary_bdev, + write_boundary_block(boundary_bdev, boundary_block, 1 << blkbits); } } else { @@ -680,7 +592,7 @@ alloc_new: confused: if (bio) - bio = mpage_bio_submit_write(op_flags, bio); + bio = mpage_bio_submit_write(0, bio); if (mpd->use_writepage) { ret = mapping->a_ops->writepage(page, wbc); @@ -713,11 +625,8 @@ int sdfat_mpage_writepages(struct address_space *mapping, BUG_ON(!get_block); blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, sdfat_mpage_writepage, &mpd); - if (mpd.bio) { - int op_flags = wbc_to_write_flags(wbc); - - mpage_bio_submit_write(op_flags, mpd.bio); - } + if (mpd.bio) + mpage_bio_submit_write(0, mpd.bio); blk_finish_plug(&plug); return ret; } diff --git a/fs/sdfat/sdfat.c b/fs/sdfat/sdfat.c index 316b4c8364fd..2d9955cfc993 100644 --- a/fs/sdfat/sdfat.c +++ b/fs/sdfat/sdfat.c @@ -190,14 +190,6 @@ static inline void __sdfat_clean_bdev_aliases(struct block_device *bdev, sector_ { unmap_underlying_metadata(bdev, block); } - -static inline int wbc_to_write_flags(struct writeback_control *wbc) -{ - if (wbc->sync_mode == WB_SYNC_ALL) - return WRITE_SYNC; - - return 0; -} #endif @@ -232,12 +224,9 @@ static int setattr_prepare(struct dentry *dentry, struct iattr *attr) #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) -static inline void __sdfat_submit_bio_write(struct bio *bio, - struct writeback_control *wbc) +static inline void __sdfat_submit_bio_write(struct bio *bio) { - int write_flags = wbc_to_write_flags(wbc); - - bio_set_op_attrs(bio, REQ_OP_WRITE, write_flags); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); submit_bio(bio); } @@ -251,12 +240,9 @@ static inline unsigned long __sdfat_init_name_hash(const struct dentry *dentry) return init_name_hash(dentry); } #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) */ -static inline void __sdfat_submit_bio_write(struct bio *bio, - struct writeback_control *wbc) +static inline void __sdfat_submit_bio_write(struct bio *bio) { - int write_flags = wbc_to_write_flags(wbc); - - submit_bio(WRITE | write_flags, bio); + submit_bio(WRITE, bio); } static inline unsigned int __sdfat_full_name_hash(const struct dentry *unused, const char *name, unsigned int len) @@ -303,15 +289,6 @@ static inline int sdfat_remount_syncfs(struct super_block *sb) } #endif -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) - /* EMPTY */ -#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) */ -static inline void truncate_inode_pages_final(struct address_space *mapping) -{ - truncate_inode_pages(mapping, 0); -} -#endif - #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) static inline sector_t __sdfat_bio_sector(struct bio *bio) @@ -927,6 +904,15 @@ static int sdfat_file_fsync(struct file *filp, int datasync) /************************************************************************* * MORE FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY *************************************************************************/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +#define CURRENT_TIME_SEC timespec64_trunc(current_kernel_time64(), NSEC_PER_SEC) +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) +#define CURRENT_TIME_SEC timespec_trunc(current_kernel_time(), NSEC_PER_SEC) +#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */ + /* EMPTY */ +#endif + + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) static void sdfat_writepage_end_io(struct bio *bio) { @@ -2200,7 +2186,7 @@ static int sdfat_dfr_ioctl(struct inode *inode, struct file *filp, __lock_super(sb); /* Check if FS_ERROR occurred */ - if (sb_rdonly(sb)) { + if (sb->s_flags & MS_RDONLY) { dfr_err("RDONLY partition (err %d)", -EPERM); __unlock_super(sb); return -EPERM; @@ -2411,7 +2397,7 @@ static int __sdfat_create(struct inode *dir, struct dentry *dentry) TMSG("%s entered\n", __func__); - ts = current_time(dir); + ts = CURRENT_TIME_SEC; err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_REGULAR, &fid); if (err) @@ -2574,7 +2560,7 @@ static int sdfat_unlink(struct inode *dir, struct dentry *dentry) TMSG("%s entered\n", __func__); - ts = current_time(dir); + ts = CURRENT_TIME_SEC; SDFAT_I(inode)->fid.size = i_size_read(inode); @@ -2623,7 +2609,7 @@ static int sdfat_symlink(struct inode *dir, struct dentry *dentry, const char *t TMSG("%s entered\n", __func__); - ts = current_time(dir); + ts = CURRENT_TIME_SEC; err = fsapi_create(dir, (u8 *) dentry->d_name.name, FM_SYMLINK, &fid); if (err) @@ -2684,7 +2670,7 @@ static int __sdfat_mkdir(struct inode *dir, struct dentry *dentry) TMSG("%s entered\n", __func__); - ts = current_time(dir); + ts = CURRENT_TIME_SEC; err = fsapi_mkdir(dir, (u8 *) dentry->d_name.name, &fid); if (err) @@ -2733,7 +2719,7 @@ static int sdfat_rmdir(struct inode *dir, struct dentry *dentry) TMSG("%s entered\n", __func__); - ts = current_time(dir); + ts = CURRENT_TIME_SEC; SDFAT_I(inode)->fid.size = i_size_read(inode); @@ -2778,7 +2764,7 @@ static int __sdfat_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode = old_dentry->d_inode; new_inode = new_dentry->d_inode; - ts = current_time(old_inode); + ts = CURRENT_TIME_SEC; SDFAT_I(old_inode)->fid.size = i_size_read(old_inode); @@ -2856,7 +2842,7 @@ static int sdfat_cont_expand(struct inode *inode, loff_t size) if (err) return err; - inode->i_ctime = inode->i_mtime = current_time(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); if (!IS_SYNC(inode)) @@ -3114,7 +3100,7 @@ static void sdfat_truncate(struct inode *inode, loff_t old_size) if (err) goto out; - inode->i_ctime = inode->i_mtime = current_time(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; if (IS_DIRSYNC(inode)) (void) sdfat_sync_inode(inode); else @@ -3542,8 +3528,7 @@ static int sdfat_readpages(struct file *file, struct address_space *mapping, } static inline void sdfat_submit_fullpage_bio(struct block_device *bdev, - sector_t sector, unsigned int length, - struct page *page, struct writeback_control *wbc) + sector_t sector, unsigned int length, struct page *page) { /* Single page bio submit */ struct bio *bio; @@ -3567,7 +3552,7 @@ static inline void sdfat_submit_fullpage_bio(struct block_device *bdev, __sdfat_set_bio_iterate(bio, sector, length, 0, 0); bio->bi_end_io = sdfat_writepage_end_io; - __sdfat_submit_bio_write(bio, wbc); + __sdfat_submit_bio_write(bio); } static int sdfat_writepage(struct page *page, struct writeback_control *wbc) @@ -3702,7 +3687,7 @@ static int sdfat_writepage(struct page *page, struct writeback_control *wbc) sdfat_submit_fullpage_bio(head->b_bdev, head->b_blocknr << (sb->s_blocksize_bits - SECTOR_SIZE_BITS), nr_blocks_towrite << inode->i_blkbits, - page, wbc); + page); unlock_page(page); @@ -3767,7 +3752,7 @@ static int sdfat_check_writable(struct super_block *sb) if (fsapi_check_bdi_valid(sb)) return -EIO; - if (sb_rdonly(sb)) + if (sb->s_flags & MS_RDONLY) return -EROFS; return 0; @@ -3842,7 +3827,7 @@ static int sdfat_write_end(struct file *file, struct address_space *mapping, sdfat_write_failed(mapping, pos+len); if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) { - inode->i_mtime = inode->i_ctime = current_time(inode); + inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; fid->attr |= ATTR_ARCHIVE; mark_inode_dirty(inode); } @@ -4054,7 +4039,7 @@ static struct inode *sdfat_alloc_inode(struct super_block *sb) return &ei->vfs_inode; } -static void sdfat_free_inode(struct inode *inode) +static void sdfat_destroy_inode(struct inode *inode) { if (SDFAT_I(inode)->target) { kfree(SDFAT_I(inode)->target); @@ -4064,28 +4049,6 @@ static void sdfat_free_inode(struct inode *inode) kmem_cache_free(sdfat_inode_cachep, SDFAT_I(inode)); } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) -/* Use free_inode instead of destroy_inode */ -#define sdfat_destroy_inode (NULL) -#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -static void sdfat_i_callback(struct rcu_head *head) -{ - struct inode *inode = container_of(head, struct inode, i_rcu); - - sdfat_free_inode(inode); -} - -static void sdfat_destroy_inode(struct inode *inode) -{ - call_rcu(&inode->i_rcu, sdfat_i_callback); -} -#else -static void sdfat_destroy_inode(struct inode *inode) -{ - sdfat_free_inode(inode); -} -#endif - static int __sdfat_write_inode(struct inode *inode, int sync) { struct super_block *sb = inode->i_sb; @@ -4121,7 +4084,7 @@ static int sdfat_write_inode(struct inode *inode, struct writeback_control *wbc) static void sdfat_evict_inode(struct inode *inode) { - truncate_inode_pages_final(&inode->i_data); + truncate_inode_pages(&inode->i_data, 0); if (!inode->i_nlink) { loff_t old_size = i_size_read(inode); @@ -4149,8 +4112,23 @@ static void sdfat_evict_inode(struct inode *inode) /* remove_inode_hash(inode); */ } -static void sdfat_free_sb_info(struct sdfat_sb_info *sbi) + + +static void sdfat_put_super(struct super_block *sb) { + struct sdfat_sb_info *sbi = SDFAT_SB(sb); + int err; + + sdfat_log_msg(sb, KERN_INFO, "trying to unmount..."); + + __cancel_delayed_work_sync(sbi); + + if (__is_sb_dirty(sb)) + sdfat_write_super(sb); + + __free_dfr_mem_if_required(sb); + err = fsapi_umount(sb); + if (sbi->nls_disk) { unload_nls(sbi->nls_disk); sbi->nls_disk = NULL; @@ -4165,63 +4143,14 @@ static void sdfat_free_sb_info(struct sdfat_sb_info *sbi) sbi->options.iocharset = sdfat_default_iocharset; } - if (sbi->use_vmalloc) { - vfree(sbi); - return; - } - kfree(sbi); -} - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) -static void delayed_free(struct rcu_head *p) -{ - struct sdfat_sb_info *sbi = container_of(p, struct sdfat_sb_info, rcu); - - sdfat_free_sb_info(sbi); -} - -static void __sdfat_destroy_sb_info(struct super_block *sb) -{ - struct sdfat_sb_info *sbi = SDFAT_SB(sb); - - call_rcu(&sbi->rcu, delayed_free); -} -#else -static void __sdfat_destroy_sb_info(struct super_block *sb) -{ - struct sdfat_sb_info *sbi = SDFAT_SB(sb); - - sdfat_free_sb_info(sbi); sb->s_fs_info = NULL; -} -#endif - -static void sdfat_destroy_sb_info(struct super_block *sb) -{ - struct sdfat_sb_info *sbi = SDFAT_SB(sb); kobject_del(&sbi->sb_kobj); kobject_put(&sbi->sb_kobj); - - __sdfat_destroy_sb_info(sb); -} - -static void sdfat_put_super(struct super_block *sb) -{ - struct sdfat_sb_info *sbi = SDFAT_SB(sb); - int err; - - sdfat_log_msg(sb, KERN_INFO, "trying to unmount..."); - - __cancel_delayed_work_sync(sbi); - - if (__is_sb_dirty(sb)) - sdfat_write_super(sb); - - __free_dfr_mem_if_required(sb); - err = fsapi_umount(sb); - - sdfat_destroy_sb_info(sb); + if (!sbi->use_vmalloc) + kfree(sbi); + else + vfree(sbi); sdfat_log_msg(sb, KERN_INFO, "unmounted successfully! %s", err ? "(with previous I/O errors)" : ""); @@ -4252,7 +4181,7 @@ static void sdfat_write_super(struct super_block *sb) /* flush delayed FAT/DIR dirty */ __flush_delayed_meta(sb, 0); - if (!sb_rdonly(sb)) + if (!(sb->s_flags & MS_RDONLY)) fsapi_sync_fs(sb, 0); __unlock_super(sb); @@ -4380,8 +4309,7 @@ static int sdfat_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_bavail = info.FreeClusters; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); - /* Unicode utf8 255 characters */ - buf->f_namelen = MAX_NAME_LENGTH * MAX_CHARSET_SIZE; + buf->f_namelen = 260; return 0; } @@ -4393,7 +4321,7 @@ static int sdfat_remount(struct super_block *sb, int *flags, char *data) struct sdfat_sb_info *sbi = SDFAT_SB(sb); FS_INFO_T *fsi = &(sbi->fsi); - *flags |= SB_NODIRATIME; + *flags |= MS_NODIRATIME; prev_sb_flags = sb->s_flags; @@ -4402,8 +4330,8 @@ static int sdfat_remount(struct super_block *sb, int *flags, char *data) fsapi_set_vol_flags(sb, VOL_CLEAN, 1); sdfat_log_msg(sb, KERN_INFO, "re-mounted(%s->%s), eio=0x%x, Opts: %s", - (prev_sb_flags & SB_RDONLY) ? "ro" : "rw", - (*flags & SB_RDONLY) ? "ro" : "rw", + (prev_sb_flags & MS_RDONLY) ? "ro" : "rw", + (*flags & MS_RDONLY) ? "ro" : "rw", fsi->prev_eio, orig_data); kfree(orig_data); return 0; @@ -4466,11 +4394,7 @@ static int __sdfat_show_options(struct seq_file *m, struct super_block *sb) static const struct super_operations sdfat_sops = { .alloc_inode = sdfat_alloc_inode, -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) - .free_inode = sdfat_free_inode, -#else .destroy_inode = sdfat_destroy_inode, -#endif .write_inode = sdfat_write_inode, .evict_inode = sdfat_evict_inode, .put_super = sdfat_put_super, @@ -4913,7 +4837,7 @@ static int sdfat_read_root(struct inode *inode) FS_INFO_T *fsi = &(sbi->fsi); DIR_ENTRY_T info; - ts = current_time(inode); + ts = CURRENT_TIME_SEC; SDFAT_I(inode)->fid.dir.dir = fsi->root_dir; SDFAT_I(inode)->fid.dir.flags = 0x01; @@ -4997,18 +4921,11 @@ static int sdfat_fill_super(struct super_block *sb, void *data, int silent) mutex_init(&sbi->s_vlock); sb->s_fs_info = sbi; - sb->s_flags |= SB_NODIRATIME; + sb->s_flags |= MS_NODIRATIME; sb->s_magic = SDFAT_SUPER_MAGIC; sb->s_op = &sdfat_sops; ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) - sb->s_time_gran = NSEC_PER_SEC; /* the same with default */ - sb->s_time_min = SDFAT_MIN_TIMESTAMP_SECS; - sb->s_time_max = SDFAT_MAX_TIMESTAMP_SECS; -#endif - err = parse_options(sb, data, silent, &debug, &sbi->options); if (err) { sdfat_log_msg(sb, KERN_ERR, "failed to parse options"); @@ -5155,13 +5072,11 @@ static int __init sdfat_init_inodecache(void) static void sdfat_destroy_inodecache(void) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); -#endif kmem_cache_destroy(sdfat_inode_cachep); } @@ -5310,13 +5225,6 @@ error: static void __exit exit_sdfat_fs(void) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) - /* - * Make sure all delayed rcu free inodes are flushed before we - * destroy cache. - */ - rcu_barrier(); -#endif sdfat_uevent_uninit(); sdfat_statistics_uninit(); diff --git a/fs/sdfat/sdfat.h b/fs/sdfat/sdfat.h index 8a18aa5b798e..60f7811c7b99 100644 --- a/fs/sdfat/sdfat.h +++ b/fs/sdfat/sdfat.h @@ -142,9 +142,6 @@ struct sdfat_sb_info { struct mutex s_vlock; /* volume lock */ int use_vmalloc; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) - struct rcu_head rcu; -#endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) int s_dirt; struct mutex s_lock; /* superblock lock */ @@ -218,30 +215,6 @@ typedef struct timespec64 sdfat_timespec_t; typedef struct timespec sdfat_timespec_t; #endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) - -#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) */ -/* - * sb->s_flags. Note that these mirror the equivalent MS_* flags where - * represented in both. - */ -#define SB_RDONLY 1 /* Mount read-only */ -#define SB_NODIRATIME 2048 /* Do not update directory access times */ -static inline bool sb_rdonly(const struct super_block *sb) -{ - return sb->s_flags & MS_RDONLY; -} -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) - /* EMPTY */ -#else -static inline sdfat_timespec_t current_time(struct inode *inode) -{ - return CURRENT_TIME_SEC; -} -#endif /* * FIXME : needs on-disk-slot in-memory data */ @@ -414,14 +387,7 @@ extern void sdfat_time_fat2unix(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts, DATE_TIME_T *tp); extern void sdfat_time_unix2fat(struct sdfat_sb_info *sbi, sdfat_timespec_t *ts, DATE_TIME_T *tp); -extern TIMESTAMP_T *tm_now(struct inode *inode, TIMESTAMP_T *tm); -static inline TIMESTAMP_T *tm_now_sb(struct super_block *sb, TIMESTAMP_T *tm) -{ - struct inode fake_inode; - - fake_inode.i_sb = sb; - return tm_now(&fake_inode, tm); -} +extern TIMESTAMP_T *tm_now(struct sdfat_sb_info *sbi, TIMESTAMP_T *tm); #ifdef CONFIG_SDFAT_DEBUG diff --git a/fs/sdfat/sdfat_fs.h b/fs/sdfat/sdfat_fs.h index 32d5f124b9f2..23b5cda2f58c 100644 --- a/fs/sdfat/sdfat_fs.h +++ b/fs/sdfat/sdfat_fs.h @@ -148,13 +148,6 @@ #define CS_PBR_SECTOR 1 #define CS_DEFAULT 2 -/* time min/max */ -/* Jan 1 GMT 00:00:00 1980 */ -#define SDFAT_MIN_TIMESTAMP_SECS 315532800LL -/* Dec 31 GMT 23:59:59 2107 */ -#define SDFAT_MAX_TIMESTAMP_SECS 4354819199LL - - /* * ioctl command */ @@ -189,7 +182,7 @@ /* On-Disk Type Definitions */ /*----------------------------------------------------------------------*/ -/* FAT12/16/32 BIOS parameter block (64 bytes) */ +/* FAT12/16 BIOS parameter block (64 bytes) */ typedef struct { __u8 jmp_boot[3]; __u8 oem_name[8]; @@ -207,28 +200,41 @@ typedef struct { __le32 num_hid_sectors; /* . */ __le32 num_huge_sectors; - union { - struct { - __u8 phy_drv_no; - __u8 state; /* used by WinNT for mount state */ - __u8 ext_signature; - __u8 vol_serial[4]; - __u8 vol_label[11]; - __u8 vol_type[8]; - __le16 nouse; - } f16; - - struct { - __le32 num_fat32_sectors; - __le16 ext_flags; - __u8 fs_version[2]; - __le32 root_cluster; /* . */ - __le16 fsinfo_sector; - __le16 backup_sector; - __le16 reserved[6]; /* . */ - } f32; - }; -} bpb_t; + __u8 phy_drv_no; + __u8 state; /* used by WindowsNT for mount state */ + __u8 ext_signature; + __u8 vol_serial[4]; + __u8 vol_label[11]; + __u8 vol_type[8]; + __le16 dummy; +} bpb16_t; + +/* FAT32 BIOS parameter block (64 bytes) */ +typedef struct { + __u8 jmp_boot[3]; + __u8 oem_name[8]; + + __u8 sect_size[2]; /* unaligned */ + __u8 sect_per_clus; + __le16 num_reserved; + __u8 num_fats; + __u8 num_root_entries[2]; /* unaligned */ + __u8 num_sectors[2]; /* unaligned */ + __u8 media_type; + __le16 num_fat_sectors; /* zero */ + __le16 sectors_in_track; + __le16 num_heads; + __le32 num_hid_sectors; /* . */ + __le32 num_huge_sectors; + + __le32 num_fat32_sectors; + __le16 ext_flags; + __u8 fs_version[2]; + __le32 root_cluster; /* . */ + __le16 fsinfo_sector; + __le16 backup_sector; + __le16 reserved[6]; /* . */ +} bpb32_t; /* FAT32 EXTEND BIOS parameter block (32 bytes) */ typedef struct { @@ -270,12 +276,12 @@ typedef struct { /* FAT32 PBR (64 bytes) */ typedef struct { - bpb_t bpb; + bpb16_t bpb; } pbr16_t; /* FAT32 PBR[BPB+BSX] (96 bytes) */ typedef struct { - bpb_t bpb; + bpb32_t bpb; bsx32_t bsx; } pbr32_t; @@ -289,7 +295,8 @@ typedef struct { typedef struct { union { __u8 raw[64]; - bpb_t fat; + bpb16_t f16; + bpb32_t f32; bpb64_t f64; } bpb; union { diff --git a/fs/sdfat/version.h b/fs/sdfat/version.h index 92e99621143f..44e44e03d847 100644 --- a/fs/sdfat/version.h +++ b/fs/sdfat/version.h @@ -22,4 +22,4 @@ /* PURPOSE : sdFAT File Manager */ /* */ /************************************************************************/ -#define SDFAT_VERSION "2.4.5-lineage" +#define SDFAT_VERSION "2.3.0-lineage" diff --git a/fs/sdfat/xattr.c b/fs/sdfat/xattr.c index bd037358b644..40bb850711be 100644 --- a/fs/sdfat/xattr.c +++ b/fs/sdfat/xattr.c @@ -79,22 +79,12 @@ ssize_t __sdfat_getxattr(const char *name, void *value, size_t size) * FUNCTIONS WHICH HAS KERNEL VERSION DEPENDENCY *************************************************************************/ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) -#if defined(CONFIG_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) -static int sdfat_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, void *buffer, size_t size, - int flags) -{ - return __sdfat_getxattr(name, buffer, size); -} -#else static int sdfat_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { return __sdfat_getxattr(name, buffer, size); } -#endif static int sdfat_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, diff --git a/fs/seq_file.c b/fs/seq_file.c index 0004df800c88..6dc4296eed62 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -29,9 +28,6 @@ static void *seq_buf_alloc(unsigned long size) void *buf; gfp_t gfp = GFP_KERNEL; - if (unlikely(size > MAX_RW_COUNT)) - return NULL; - /* * For high order allocations, use __GFP_NORETRY to avoid oom-killing - * it's better to fall back to vmalloc() than to kill things. For small @@ -683,11 +679,11 @@ EXPORT_SYMBOL(seq_puts); /* * A helper routine for putting decimal numbers without rich format of printf(). * only 'unsigned long long' is supported. - * This routine will put strlen(delimiter) + number into seq_file. + * This routine will put one byte delimiter + number into seq_file. * This routine is very quick when you show lots of numbers. * In usual cases, it will be better to use seq_printf(). It's easier to read. */ -void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, +void seq_put_decimal_ull(struct seq_file *m, char delimiter, unsigned long long num) { int len; @@ -695,15 +691,8 @@ void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */ goto overflow; - len = strlen(delimiter); - if (m->count + len >= m->size) - goto overflow; - - memcpy(m->buf + m->count, delimiter, len); - m->count += len; - - if (m->count + 1 >= m->size) - goto overflow; + if (delimiter) + m->buf[m->count++] = delimiter; if (num < 10) { m->buf[m->count++] = num + '0'; @@ -713,7 +702,6 @@ void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, len = num_to_str(m->buf + m->count, m->size - m->count, num); if (!len) goto overflow; - m->count += len; return; @@ -722,42 +710,19 @@ overflow: } EXPORT_SYMBOL(seq_put_decimal_ull); -void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num) +void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num) { - int len; - - if (m->count + 3 >= m->size) /* we'll write 2 bytes at least */ - goto overflow; - - len = strlen(delimiter); - if (m->count + len >= m->size) - goto overflow; - - memcpy(m->buf + m->count, delimiter, len); - m->count += len; - - if (m->count + 2 >= m->size) - goto overflow; - if (num < 0) { - m->buf[m->count++] = '-'; + if (m->count + 3 >= m->size) { + seq_set_overflow(m); + return; + } + if (delimiter) + m->buf[m->count++] = delimiter; num = -num; + delimiter = '-'; } - - if (num < 10) { - m->buf[m->count++] = num + '0'; - return; - } - - len = num_to_str(m->buf + m->count, m->size - m->count, num); - if (!len) - goto overflow; - - m->count += len; - return; - -overflow: - seq_set_overflow(m); + seq_put_decimal_ull(m, delimiter, num); } EXPORT_SYMBOL(seq_put_decimal_ll); diff --git a/fs/signalfd.c b/fs/signalfd.c index 9c5fa0ab5e0f..270221fcef42 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -34,7 +34,17 @@ void signalfd_cleanup(struct sighand_struct *sighand) { - wake_up_pollfree(&sighand->signalfd_wqh); + wait_queue_head_t *wqh = &sighand->signalfd_wqh; + /* + * The lockless check can race with remove_wait_queue() in progress, + * but in this case its caller should run under rcu_read_lock() and + * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. + */ + if (likely(!waitqueue_active(wqh))) + return; + + /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ + wake_up_poll(wqh, POLLHUP | POLLFREE); } struct signalfd_ctx { diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c index 1d406a2094a5..8073b6532cf0 100644 --- a/fs/squashfs/export.c +++ b/fs/squashfs/export.c @@ -54,17 +54,12 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) struct squashfs_sb_info *msblk = sb->s_fs_info; int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); - u64 start; + u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); __le64 ino; int err; TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); - if (ino_num == 0 || (ino_num - 1) >= msblk->inodes) - return -EINVAL; - - start = le64_to_cpu(msblk->inode_lookup_table[blk]); - err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); if (err < 0) return err; @@ -129,10 +124,7 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, u64 lookup_table_start, u64 next_table, unsigned int inodes) { unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); - unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes); - int n; __le64 *table; - u64 start, end; TRACE("In read_inode_lookup_table, length %d\n", length); @@ -142,41 +134,20 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, if (inodes == 0) return ERR_PTR(-EINVAL); - /* - * The computed size of the lookup table (length bytes) should exactly - * match the table start and end points + /* length bytes should not extend into the next table - this check + * also traps instances where lookup_table_start is incorrectly larger + * than the next table start */ - if (length != (next_table - lookup_table_start)) + if (lookup_table_start + length > next_table) return ERR_PTR(-EINVAL); table = squashfs_read_table(sb, lookup_table_start, length); - if (IS_ERR(table)) - return table; /* - * table0], table[1], ... table[indexes - 1] store the locations - * of the compressed inode lookup blocks. Each entry should be - * less than the next (i.e. table[0] < table[1]), and the difference - * between them should be SQUASHFS_METADATA_SIZE or less. - * table[indexes - 1] should be less than lookup_table_start, and - * again the difference should be SQUASHFS_METADATA_SIZE or less + * table[0] points to the first inode lookup table metadata block, + * this should be less than lookup_table_start */ - for (n = 0; n < (indexes - 1); n++) { - start = le64_to_cpu(table[n]); - end = le64_to_cpu(table[n + 1]); - - if (start >= end - || (end - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { - kfree(table); - return ERR_PTR(-EINVAL); - } - } - - start = le64_to_cpu(table[indexes - 1]); - if (start >= lookup_table_start || - (lookup_table_start - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) { kfree(table); return ERR_PTR(-EINVAL); } diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 979da17cbbf3..1ec7bae2751d 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -224,11 +224,11 @@ failure: * If the skip factor is limited in this way then the file will use multiple * slots. */ -static inline int calculate_skip(u64 blocks) +static inline int calculate_skip(int blocks) { - u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1) + int skip = blocks / ((SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES); - return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1); + return min(SQUASHFS_CACHED_BLKS - 1, skip + 1); } diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c index d2e15baab537..d38ea3dab951 100644 --- a/fs/squashfs/id.c +++ b/fs/squashfs/id.c @@ -48,15 +48,10 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, struct squashfs_sb_info *msblk = sb->s_fs_info; int block = SQUASHFS_ID_BLOCK(index); int offset = SQUASHFS_ID_BLOCK_OFFSET(index); - u64 start_block; + u64 start_block = le64_to_cpu(msblk->id_table[block]); __le32 disk_id; int err; - if (index >= msblk->ids) - return -EINVAL; - - start_block = le64_to_cpu(msblk->id_table[block]); - err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, sizeof(disk_id)); if (err < 0) @@ -74,10 +69,7 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, u64 id_table_start, u64 next_table, unsigned short no_ids) { unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); - unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids); - int n; __le64 *table; - u64 start, end; TRACE("In read_id_index_table, length %d\n", length); @@ -88,38 +80,20 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb, return ERR_PTR(-EINVAL); /* - * The computed size of the index table (length bytes) should exactly - * match the table start and end points + * length bytes should not extend into the next table - this check + * also traps instances where id_table_start is incorrectly larger + * than the next table start */ - if (length != (next_table - id_table_start)) + if (id_table_start + length > next_table) return ERR_PTR(-EINVAL); table = squashfs_read_table(sb, id_table_start, length); - if (IS_ERR(table)) - return table; /* - * table[0], table[1], ... table[indexes - 1] store the locations - * of the compressed id blocks. Each entry should be less than - * the next (i.e. table[0] < table[1]), and the difference between them - * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] - * should be less than id_table_start, and again the difference - * should be SQUASHFS_METADATA_SIZE or less + * table[0] points to the first id lookup table metadata block, this + * should be less than id_table_start */ - for (n = 0; n < (indexes - 1); n++) { - start = le64_to_cpu(table[n]); - end = le64_to_cpu(table[n + 1]); - - if (start >= end || (end - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { - kfree(table); - return ERR_PTR(-EINVAL); - } - } - - start = le64_to_cpu(table[indexes - 1]); - if (start >= id_table_start || (id_table_start - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { + if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) { kfree(table); return ERR_PTR(-EINVAL); } diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 2fd1262cc1bd..e66486366f02 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -30,7 +30,6 @@ /* size of metadata (inode and directory) blocks */ #define SQUASHFS_METADATA_SIZE 8192 -#define SQUASHFS_BLOCK_OFFSET 2 /* default size of block device I/O */ #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index 5234c19a0eab..ef69c31947bf 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h @@ -77,6 +77,5 @@ struct squashfs_sb_info { unsigned int inodes; unsigned int fragments; int xattr_ids; - unsigned int ids; }; #endif diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 44500dcf1805..93aa3e23c845 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -177,7 +177,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); msblk->fragments = le32_to_cpu(sblk->fragments); - msblk->ids = le16_to_cpu(sblk->no_ids); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); @@ -189,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of fragments %d\n", msblk->fragments); - TRACE("Number of ids %d\n", msblk->ids); + TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->fragment_table_start %llx\n", @@ -246,7 +245,8 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) allocate_id_index_table: /* Allocate and read id index table */ msblk->id_table = squashfs_read_id_index_table(sb, - le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); + le64_to_cpu(sblk->id_table_start), next_table, + le16_to_cpu(sblk->no_ids)); if (IS_ERR(msblk->id_table)) { ERROR("unable to read id index table\n"); err = PTR_ERR(msblk->id_table); diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h index 30b3aaa08b62..c83f5d9ec125 100644 --- a/fs/squashfs/xattr.h +++ b/fs/squashfs/xattr.h @@ -30,16 +30,8 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *, static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, u64 *xattr_table_start, int *xattr_ids) { - struct squashfs_xattr_id_table *id_table; - - id_table = squashfs_read_table(sb, start, sizeof(*id_table)); - if (IS_ERR(id_table)) - return (__le64 *) id_table; - - *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); - kfree(id_table); - ERROR("Xattrs in filesystem, these will be ignored\n"); + *xattr_table_start = start; return ERR_PTR(-ENOTSUPP); } diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index 7f718d2bf357..c89607d690c4 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c @@ -44,15 +44,10 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, struct squashfs_sb_info *msblk = sb->s_fs_info; int block = SQUASHFS_XATTR_BLOCK(index); int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index); - u64 start_block; + u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]); struct squashfs_xattr_id id; int err; - if (index >= msblk->xattr_ids) - return -EINVAL; - - start_block = le64_to_cpu(msblk->xattr_id_table[block]); - err = squashfs_read_metadata(sb, &id, &start_block, &offset, sizeof(id)); if (err < 0) @@ -68,17 +63,13 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index, /* * Read uncompressed xattr id lookup table indexes from disk into memory */ -__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, +__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, u64 *xattr_table_start, int *xattr_ids) { - struct squashfs_sb_info *msblk = sb->s_fs_info; - unsigned int len, indexes; + unsigned int len; struct squashfs_xattr_id_table *id_table; - __le64 *table; - u64 start, end; - int n; - id_table = squashfs_read_table(sb, table_start, sizeof(*id_table)); + id_table = squashfs_read_table(sb, start, sizeof(*id_table)); if (IS_ERR(id_table)) return (__le64 *) id_table; @@ -92,54 +83,13 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start, if (*xattr_ids == 0) return ERR_PTR(-EINVAL); - len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); - indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids); - - /* - * The computed size of the index table (len bytes) should exactly - * match the table start and end points - */ - start = table_start + sizeof(*id_table); - end = msblk->bytes_used; - - if (len != (end - start)) + /* xattr_table should be less than start */ + if (*xattr_table_start >= start) return ERR_PTR(-EINVAL); - table = squashfs_read_table(sb, start, len); - if (IS_ERR(table)) - return table; - - /* table[0], table[1], ... table[indexes - 1] store the locations - * of the compressed xattr id blocks. Each entry should be less than - * the next (i.e. table[0] < table[1]), and the difference between them - * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1] - * should be less than table_start, and again the difference - * shouls be SQUASHFS_METADATA_SIZE or less. - * - * Finally xattr_table_start should be less than table[0]. - */ - for (n = 0; n < (indexes - 1); n++) { - start = le64_to_cpu(table[n]); - end = le64_to_cpu(table[n + 1]); - - if (start >= end || (end - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { - kfree(table); - return ERR_PTR(-EINVAL); - } - } - - start = le64_to_cpu(table[indexes - 1]); - if (start >= table_start || (table_start - start) > - (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { - kfree(table); - return ERR_PTR(-EINVAL); - } + len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); - if (*xattr_table_start >= le64_to_cpu(table[0])) { - kfree(table); - return ERR_PTR(-EINVAL); - } + TRACE("In read_xattr_index_table, length %d\n", len); - return table; + return squashfs_read_table(sb, start + sizeof(*id_table), len); } diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 300cdbdc8494..666986b95c5d 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "sysfs.h" #include "../kernfs/kernfs-internal.h" @@ -550,57 +549,3 @@ void sysfs_remove_bin_file(struct kobject *kobj, kernfs_remove_by_name(kobj->sd, attr->attr.name); } EXPORT_SYMBOL_GPL(sysfs_remove_bin_file); - -/** - * sysfs_emit - scnprintf equivalent, aware of PAGE_SIZE buffer. - * @buf: start of PAGE_SIZE buffer. - * @fmt: format - * @...: optional arguments to @format - * - * - * Returns number of characters written to @buf. - */ -int sysfs_emit(char *buf, const char *fmt, ...) -{ - va_list args; - int len; - - if (WARN(!buf || offset_in_page(buf), - "invalid sysfs_emit: buf:%p\n", buf)) - return 0; - - va_start(args, fmt); - len = vscnprintf(buf, PAGE_SIZE, fmt, args); - va_end(args); - - return len; -} -EXPORT_SYMBOL_GPL(sysfs_emit); - -/** - * sysfs_emit_at - scnprintf equivalent, aware of PAGE_SIZE buffer. - * @buf: start of PAGE_SIZE buffer. - * @at: offset in @buf to start write in bytes - * @at must be >= 0 && < PAGE_SIZE - * @fmt: format - * @...: optional arguments to @fmt - * - * - * Returns number of characters written starting at &@buf[@at]. - */ -int sysfs_emit_at(char *buf, int at, const char *fmt, ...) -{ - va_list args; - int len; - - if (WARN(!buf || offset_in_page(buf) || at < 0 || at >= PAGE_SIZE, - "invalid sysfs_emit_at: buf:%p at:%d\n", buf, at)) - return 0; - - va_start(args, fmt); - len = vscnprintf(buf + at, PAGE_SIZE - at, fmt, args); - va_end(args); - - return len; -} -EXPORT_SYMBOL_GPL(sysfs_emit_at); diff --git a/fs/timerfd.c b/fs/timerfd.c index ab8dd1538381..1327a02ec778 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -400,11 +400,6 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) clockid != CLOCK_BOOTTIME_ALARM)) return -EINVAL; - if (!capable(CAP_WAKE_ALARM) && - (clockid == CLOCK_REALTIME_ALARM || - clockid == CLOCK_BOOTTIME_ALARM)) - return -EPERM; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -449,11 +444,6 @@ static int do_timerfd_settime(int ufd, int flags, return ret; ctx = f.file->private_data; - if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) { - fdput(f); - return -EPERM; - } - timerfd_setup_cancel(ctx, flags); /* diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index 7c24260793b7..c66f2423e1f5 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -162,77 +162,6 @@ struct tracefs_fs_info { struct tracefs_mount_opts mount_opts; }; -static void change_gid(struct dentry *dentry, kgid_t gid) -{ - if (!dentry->d_inode) - return; - dentry->d_inode->i_gid = gid; -} - -/* - * Taken from d_walk, but without he need for handling renames. - * Nothing can be renamed while walking the list, as tracefs - * does not support renames. This is only called when mounting - * or remounting the file system, to set all the files to - * the given gid. - */ -static void set_gid(struct dentry *parent, kgid_t gid) -{ - struct dentry *this_parent; - struct list_head *next; - - this_parent = parent; - spin_lock(&this_parent->d_lock); - - change_gid(this_parent, gid); -repeat: - next = this_parent->d_subdirs.next; -resume: - while (next != &this_parent->d_subdirs) { - struct list_head *tmp = next; - struct dentry *dentry = list_entry(tmp, struct dentry, d_child); - next = tmp->next; - - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - - change_gid(dentry, gid); - - if (!list_empty(&dentry->d_subdirs)) { - spin_unlock(&this_parent->d_lock); - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); - this_parent = dentry; - spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); - goto repeat; - } - spin_unlock(&dentry->d_lock); - } - /* - * All done at this level ... ascend and resume the search. - */ - rcu_read_lock(); -ascend: - if (this_parent != parent) { - struct dentry *child = this_parent; - this_parent = child->d_parent; - - spin_unlock(&child->d_lock); - spin_lock(&this_parent->d_lock); - - /* go into the first sibling still alive */ - do { - next = child->d_child.next; - if (next == &this_parent->d_subdirs) - goto ascend; - child = list_entry(next, struct dentry, d_child); - } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); - rcu_read_unlock(); - goto resume; - } - rcu_read_unlock(); - spin_unlock(&this_parent->d_lock); - return; -} - static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; @@ -291,9 +220,7 @@ static int tracefs_apply_options(struct super_block *sb) inode->i_mode |= opts->mode; inode->i_uid = opts->uid; - - /* Set all the group ids to the mount option */ - set_gid(sb->s_root, opts->gid); + inode->i_gid = opts->gid; return 0; } @@ -484,8 +411,6 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode, inode->i_mode = mode; inode->i_fop = fops ? fops : &tracefs_file_operations; inode->i_private = data; - inode->i_uid = d_inode(dentry->d_parent)->i_uid; - inode->i_gid = d_inode(dentry->d_parent)->i_gid; d_instantiate(dentry, inode); fsnotify_create(dentry->d_parent->d_inode, dentry); return end_creating(dentry); @@ -504,12 +429,9 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, if (unlikely(!inode)) return failed_creating(dentry); - /* Do not set bits for OTH */ - inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; + inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_op = ops; inode->i_fop = &simple_dir_operations; - inode->i_uid = d_inode(dentry->d_parent)->i_uid; - inode->i_gid = d_inode(dentry->d_parent)->i_gid; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 99caaae01cab..9213a9e046ae 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) { uint32_t crc; - ubifs_assert(pad >= 0); + ubifs_assert(pad >= 0 && !(pad & 7)); if (pad >= UBIFS_PAD_NODE_SZ) { struct ubifs_ch *ch = buf; @@ -721,10 +721,6 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) * write-buffer. */ memcpy(wbuf->buf + wbuf->used, buf, len); - if (aligned_len > len) { - ubifs_assert(aligned_len - len < 8); - ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len); - } if (aligned_len == wbuf->avail) { dbg_io("flush jhead %s wbuf to LEB %d:%d", @@ -817,18 +813,13 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) } spin_lock(&wbuf->lock); - if (aligned_len) { + if (aligned_len) /* * And now we have what's left and what does not take whole * max. write unit, so write it to the write-buffer and we are * done. */ memcpy(wbuf->buf, buf + written, len); - if (aligned_len > len) { - ubifs_assert(aligned_len - len < 8); - ubifs_pad(c, wbuf->buf + len, aligned_len - len); - } - } if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2b35d1dd665d..7968b7a5e787 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1695,6 +1695,7 @@ out: kthread_stop(c->bgt); c->bgt = NULL; } + free_wbufs(c); kfree(c->write_reserve_buf); c->write_reserve_buf = NULL; vfree(c->ileb_buf); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index b709c51c9f9e..2c39c1c81196 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -260,6 +260,10 @@ int udf_expand_file_adinicb(struct inode *inode) char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); int err; + struct writeback_control udf_wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = 1, + }; WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex)); if (!iinfo->i_lenAlloc) { @@ -303,10 +307,8 @@ int udf_expand_file_adinicb(struct inode *inode) iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; - set_page_dirty(page); - unlock_page(page); up_write(&iinfo->i_data_sem); - err = filemap_fdatawrite(inode->i_mapping); + err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); @@ -318,7 +320,6 @@ int udf_expand_file_adinicb(struct inode *inode) unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; - iinfo->i_lenAlloc = inode->i_size; up_write(&iinfo->i_data_sem); } page_cache_release(page); diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 8c7f9ea251e5..71d1c25f360d 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c @@ -175,22 +175,13 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, else offset = le32_to_cpu(eahd->appAttrLocation); - while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) { - uint32_t attrLength; - + while (offset < iinfo->i_lenEAttr) { gaf = (struct genericFormat *)&ea[offset]; - attrLength = le32_to_cpu(gaf->attrLength); - - /* Detect undersized elements and buffer overflows */ - if ((attrLength < sizeof(*gaf)) || - (attrLength > (iinfo->i_lenEAttr - offset))) - break; - if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) return gaf; else - offset += attrLength; + offset += le32_to_cpu(gaf->attrLength); } } diff --git a/fs/udf/namei.c b/fs/udf/namei.c index af6fd442b9d8..c97b5a8d1e24 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -947,10 +947,6 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, iinfo->i_location.partitionReferenceNum, 0); epos.bh = udf_tgetblk(sb, block); - if (unlikely(!epos.bh)) { - err = -ENOMEM; - goto out_no_entry; - } lock_buffer(epos.bh); memset(epos.bh->b_data, 0x00, bsize); set_buffer_uptodate(epos.bh); diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 2e4f78d68b05..e4a4f82ea13f 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -729,8 +729,7 @@ xfs_ioc_space( flags |= XFS_PREALLOC_CLEAR; if (bf->l_start > XFS_ISIZE(ip)) { error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), - bf->l_start - XFS_ISIZE(ip), - XFS_BMAPI_PREALLOC); + bf->l_start - XFS_ISIZE(ip), 0); if (error) goto out_unlock; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index d70a004378d8..245268a0cdf0 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -770,7 +770,7 @@ xfs_setattr_size( ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| - ATTR_MTIME_SET|ATTR_TIMES_SET)) == 0); + ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); oldsize = inode->i_size; newsize = iattr->ia_size; diff --git a/gen_headers_arm.bp b/gen_headers_arm.bp index 9cded5bbba09..9b6721d24b38 100644 --- a/gen_headers_arm.bp +++ b/gen_headers_arm.bp @@ -490,8 +490,6 @@ gen_headers_out_arm = [ "linux/qcota.h", "linux/qg-profile.h", "linux/qg.h", - "linux/qbg-profile.h", - "linux/qbg.h", "linux/qnx4_fs.h", "linux/qnxtypes.h", "linux/qrng.h", @@ -533,7 +531,6 @@ gen_headers_out_arm = [ "linux/shm.h", "linux/signal.h", "linux/signalfd.h", - "linux/slatecom_interface.h", "linux/smcinvoke.h", "linux/smiapp.h", "linux/snmp.h", diff --git a/gen_headers_arm64.bp b/gen_headers_arm64.bp index 655b8336c720..db09deada714 100644 --- a/gen_headers_arm64.bp +++ b/gen_headers_arm64.bp @@ -488,8 +488,6 @@ gen_headers_out_arm64 = [ "linux/qcota.h", "linux/qg-profile.h", "linux/qg.h", - "linux/qbg-profile.h", - "linux/qbg.h", "linux/qnx4_fs.h", "linux/qnxtypes.h", "linux/qrng.h", @@ -531,7 +529,6 @@ gen_headers_out_arm64 = [ "linux/shm.h", "linux/signal.h", "linux/signalfd.h", - "linux/slatecom_interface.h", "linux/smcinvoke.h", "linux/smiapp.h", "linux/snmp.h", diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5f794f6ec6c7..9dbb739cafa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -165,13 +165,6 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) #define tlb_end_vma __tlb_end_vma #endif -static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, - unsigned long address, unsigned long size) -{ - tlb->start = min(tlb->start, address); - tlb->end = max(tlb->end, address + size); -} - #ifndef __tlb_remove_tlb_entry #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 52ab4aa7f214..2a673e31b3dd 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -430,12 +430,8 @@ * during second ld run in second ld pass when generating System.map */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text.hot.*) \ - *(.text .text.fixup) \ - *(.text.unlikely .text.unlikely.*) \ - *(.text.unknown .text.unknown.*) \ + *(.text.hot .text .text.fixup .text.unlikely) \ *(.ref.text) \ - *(.text.asan.* .text.tsan.*) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index b6a979fae948..90aa5cb7ea82 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -83,7 +83,13 @@ int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); void ahash_free_instance(struct crypto_instance *inst); -bool crypto_shash_alg_has_setkey(struct shash_alg *alg); +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +{ + return alg->setkey != shash_no_setkey; +} bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); diff --git a/include/dt-bindings/clock/msm-clocks-8998.h b/include/dt-bindings/clock/msm-clocks-8998.h index 3d8e5663d723..67e47c46e09a 100644 --- a/include/dt-bindings/clock/msm-clocks-8998.h +++ b/include/dt-bindings/clock/msm-clocks-8998.h @@ -258,6 +258,7 @@ #define clk_hlos1_vote_lpass_core_smmu_clk 0x3aaa1743 #define clk_hlos1_vote_lpass_adsp_smmu_clk 0xc76f702f #define clk_gcc_mss_cfg_ahb_clk 0x111cde81 +#define clk_gcc_mss_q6_bimc_axi_clk 0x67544d62 #define clk_gcc_mss_mnoc_bimc_axi_clk 0xf665d03f #define clk_gpll0_out_msscc 0x7d794829 #define clk_gcc_mss_snoc_axi_clk 0x0e71de85 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 719fb8b320fd..0bd0a9ad5455 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -604,13 +604,6 @@ static inline int acpi_device_modalias(struct device *dev, return -ENODEV; } -static inline struct platform_device * -acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) -{ - return NULL; -} - static inline bool acpi_dma_supported(struct acpi_device *adev) { return false; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 970bef82da35..4bc6540d426b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -527,13 +526,4 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } -extern const char *bdi_unknown_name; - -static inline const char *bdi_dev_name(struct backing_dev_info *bdi) -{ - if (!bdi || !bdi->dev) - return bdi_unknown_name; - return dev_name(bdi->dev); -} - #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1607d1a84da5..3e928b86966e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -838,19 +838,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) return bdev->bd_disk->queue; /* this is never NULL */ } -/* - * The basic unit of block I/O is a sector. It is used in a number of contexts - * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 - * bytes. Variables of type sector_t represent an offset or size that is a - * multiple of 512 bytes. Hence these two constants. - */ -#ifndef SECTOR_SHIFT -#define SECTOR_SHIFT 9 -#endif -#ifndef SECTOR_SIZE -#define SECTOR_SIZE (1 << SECTOR_SHIFT) -#endif - /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -878,20 +865,19 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); static inline unsigned int blk_rq_sectors(const struct request *rq) { - return blk_rq_bytes(rq) >> SECTOR_SHIFT; + return blk_rq_bytes(rq) >> 9; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; + return blk_rq_cur_bytes(rq) >> 9; } static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, unsigned int cmd_flags) { if (unlikely(cmd_flags & REQ_DISCARD)) - return min(q->limits.max_discard_sectors, - UINT_MAX >> SECTOR_SHIFT); + return min(q->limits.max_discard_sectors, UINT_MAX >> 9); if (unlikely(cmd_flags & REQ_WRITE_SAME)) return q->limits.max_write_same_sectors; @@ -1157,21 +1143,16 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { - return blkdev_issue_discard(sb->s_bdev, - block << (sb->s_blocksize_bits - - SECTOR_SHIFT), - nr_blocks << (sb->s_blocksize_bits - - SECTOR_SHIFT), + return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), gfp_mask, flags); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, - block << (sb->s_blocksize_bits - - SECTOR_SHIFT), - nr_blocks << (sb->s_blocksize_bits - - SECTOR_SHIFT), + block << (sb->s_blocksize_bits - 9), + nr_blocks << (sb->s_blocksize_bits - 9), gfp_mask, true); } @@ -1278,8 +1259,7 @@ static inline int queue_alignment_offset(struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) - << SECTOR_SHIFT; + unsigned int alignment = sector_div(sector, granularity >> 9) << 9; return (granularity + lim->alignment_offset - alignment) % granularity; } @@ -1313,8 +1293,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return 0; /* Why are these in bytes, not sectors? */ - alignment = lim->discard_alignment >> SECTOR_SHIFT; - granularity = lim->discard_granularity >> SECTOR_SHIFT; + alignment = lim->discard_alignment >> 9; + granularity = lim->discard_granularity >> 9; if (!granularity) return 0; @@ -1325,7 +1305,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ - return offset << SECTOR_SHIFT; + return offset << 9; } static inline int bdev_discard_alignment(struct block_device *bdev) diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index 0e7350973e0e..1a2111c775ae 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -48,12 +48,8 @@ static inline void can_skb_reserve(struct sk_buff *skb) static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) { - /* If the socket has already been closed by user space, the - * refcount may already be 0 (and the socket will be freed - * after the last TX skb has been freed). So only increase - * socket refcount if the refcount is > 0. - */ - if (sk && atomic_inc_not_zero(&sk->sk_refcnt)) { + if (sk) { + sock_hold(sk); skb->destructor = sock_efree; skb->sk = sk; } diff --git a/include/linux/compat.h b/include/linux/compat.h index 24dd42910d7c..a76c9172b2eb 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -306,6 +306,8 @@ struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; +extern void compat_exit_robust_list(struct task_struct *curr); + asmlinkage long compat_sys_set_robust_list(struct compat_robust_list_head __user *head, compat_size_t len); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 6851c4214ac6..de179993e039 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,17 +15,3 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - -/* - * Not all versions of clang implement the the type-generic versions - * of the builtin overflow checkers. Fortunately, clang implements - * __has_builtin allowing us to avoid awkward version - * checks. Unfortunately, we don't know which version of gcc clang - * pretends to be, so the macro may or may not be defined. - */ -#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW -#if __has_builtin(__builtin_mul_overflow) && \ - __has_builtin(__builtin_add_overflow) && \ - __has_builtin(__builtin_sub_overflow) -#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 -#endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bce8a8f5b562..af8b4a879934 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -145,12 +145,6 @@ #if GCC_VERSION < 30200 # error Sorry, your compiler is too old - please upgrade it. -#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100 && !defined(__clang__) -/* - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293 - * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk - */ -# error Sorry, your version of GCC is too old - please use 5.1 or newer. #endif #if GCC_VERSION < 30300 @@ -321,7 +315,3 @@ * code */ #define uninitialized_var(x) x = x - -#if GCC_VERSION >= 50100 -#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 -#endif diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 8c9897b1b953..d4c71132d07f 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -43,7 +43,3 @@ #define __builtin_bswap16 _bswap16 #endif -/* - * icc defines __GNUC__, but does not implement the builtin overflow checkers. - */ -#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW diff --git a/include/linux/compiler.h b/include/linux/compiler.h index bc8077e5e688..7cabe0cc8665 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -208,8 +208,6 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); (typeof(ptr)) (__ptr + (off)); }) #endif -#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) - #ifndef OPTIMIZER_HIDE_VAR #define OPTIMIZER_HIDE_VAR(var) barrier() #endif diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 3a7fd222845e..e329ee2667e1 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -29,7 +29,6 @@ struct vc_data { unsigned int vc_rows; unsigned int vc_size_row; /* Bytes per row */ unsigned int vc_scan_lines; /* # of scan lines */ - unsigned int vc_cell_height; /* CRTC character cell height */ unsigned long vc_origin; /* [!] Start of real screen */ unsigned long vc_scr_end; /* [!] End of real screen */ unsigned long vc_visible_origin; /* [!] Top of visible window */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8e9d08dfbd18..9302d016b89f 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -123,9 +123,6 @@ struct cpufreq_policy { unsigned int up_transition_delay_us; unsigned int down_transition_delay_us; - /* Boost switch for tasks with p->in_iowait set */ - bool iowait_boost_enable; - /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; int cached_resolved_idx; diff --git a/include/linux/cred.h b/include/linux/cred.h index ee2b36cdb80d..d2db1da3036c 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -243,7 +243,7 @@ static inline struct cred *get_new_cred(struct cred *cred) * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must - * release the reference. If %NULL is passed, it is returned with no action. + * release the reference. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the @@ -254,8 +254,6 @@ static inline struct cred *get_new_cred(struct cred *cred) static inline const struct cred *get_cred(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; - if (!cred) - return cred; validate_creds(cred); nonconst_cred->non_rcu = 0; return get_new_cred(nonconst_cred); @@ -266,7 +264,7 @@ static inline const struct cred *get_cred(const struct cred *cred) * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref - * is released. If %NULL is passed, nothing is done. + * is released. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental @@ -276,11 +274,9 @@ static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; - if (cred) { - validate_creds(cred); - if (atomic_dec_and_test(&(cred)->usage)) - __put_cred(cred); - } + validate_creds(cred); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); } /** diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 71a205c5c125..b393ab66073a 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -556,6 +556,8 @@ extern struct ratelimit_state dm_ratelimit_state; #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) +#define SECTOR_SHIFT 9 + /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/device.h b/include/linux/device.h index 9bb803cf54fc..21722b8509ce 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -806,7 +806,6 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ - raw_spinlock_t msi_lock; struct list_head msi_list; #endif diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index a65dadad65bf..698d51a0eea3 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -55,7 +55,6 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse } #endif -#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64) /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -70,26 +69,5 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset); extern int elf_core_write_extra_data(struct coredump_params *cprm); extern size_t elf_core_extra_data_size(void); -#else -static inline Elf_Half elf_core_extra_phdrs(void) -{ - return 0; -} - -static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) -{ - return 1; -} - -static inline int elf_core_write_extra_data(struct coredump_params *cprm) -{ - return 1; -} - -static inline size_t elf_core_extra_data_size(void) -{ - return 0; -} -#endif #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/file.h b/include/linux/file.h index 67f0888abdde..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -12,7 +12,6 @@ struct file; extern void fput(struct file *); -extern void fput_many(struct file *, unsigned int); struct file_operations; struct vfsmount; @@ -41,7 +40,6 @@ static inline void fdput(struct fd fd) } extern struct file *fget(unsigned int fd); -extern struct file *fget_many(unsigned int fd, unsigned int refs); extern struct file *fget_raw(unsigned int fd); extern unsigned long __fdget(unsigned int fd); extern unsigned long __fdget_raw(unsigned int fd); diff --git a/include/linux/fs.h b/include/linux/fs.h index e7703b0a48f8..bad681859554 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -146,6 +146,9 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); /* File is stream-like */ #define FMODE_STREAM ((__force fmode_t)0x200000) +/* File hasn't page cache and can't be mmaped, for stackable filesystem */ +#define FMODE_NONMAPPABLE ((__force fmode_t)0x400000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) @@ -961,9 +964,7 @@ static inline struct file *get_file(struct file *f) atomic_long_inc(&f->f_count); return f; } -#define get_file_rcu_many(x, cnt) \ - atomic_long_add_unless(&(x)->f_count, (cnt), 0) -#define get_file_rcu(x) get_file_rcu_many((x), 1) +#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) #define file_count(x) atomic_long_read(&(x)->f_count) @@ -1726,6 +1727,7 @@ struct file_operations { #ifndef CONFIG_MMU unsigned (*mmap_capabilities)(struct file *); #endif + struct file* (*get_lower_file)(struct file *f); }; struct inode_operations { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a85d7b71e329..312a4ef093e3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -748,9 +748,7 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* for init task */ -#define INIT_FTRACE_GRAPH \ - .ret_stack = NULL, \ - .tracing_graph_pause = ATOMIC_INIT(0), +#define INIT_FTRACE_GRAPH .ret_stack = NULL, /* * Stack of return addresses for functions diff --git a/include/linux/futex.h b/include/linux/futex.h index 0f294ae63c78..c015fa91e7cc 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -1,8 +1,6 @@ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H -#include - #include struct inode; @@ -13,6 +11,9 @@ union ktime; long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout, u32 __user *uaddr2, u32 val2, u32 val3); +extern int +handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); + /* * Futexes are matched on equal values of this key. * The key type depends on whether it's a shared or private mapping. @@ -55,34 +56,19 @@ union futex_key { #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX -enum { - FUTEX_STATE_OK, - FUTEX_STATE_EXITING, - FUTEX_STATE_DEAD, -}; - -static inline void futex_init_task(struct task_struct *tsk) -{ - tsk->robust_list = NULL; -#ifdef CONFIG_COMPAT - tsk->compat_robust_list = NULL; +extern void exit_robust_list(struct task_struct *curr); +extern void exit_pi_state_list(struct task_struct *curr); +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +#define futex_cmpxchg_enabled 1 +#else +extern int futex_cmpxchg_enabled; #endif - INIT_LIST_HEAD(&tsk->pi_state_list); - tsk->pi_state_cache = NULL; - tsk->futex_state = FUTEX_STATE_OK; - mutex_init(&tsk->futex_exit_mutex); -} - -void futex_exit_recursive(struct task_struct *tsk); -void futex_exit_release(struct task_struct *tsk); -void futex_exec_release(struct task_struct *tsk); - -long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, - u32 __user *uaddr2, u32 val2, u32 val3); #else -static inline void futex_init_task(struct task_struct *tsk) { } -static inline void futex_exit_recursive(struct task_struct *tsk) { } -static inline void futex_exit_release(struct task_struct *tsk) { } -static inline void futex_exec_release(struct task_struct *tsk) { } +static inline void exit_robust_list(struct task_struct *curr) +{ +} +static inline void exit_pi_state_list(struct task_struct *curr) +{ +} #endif #endif diff --git a/include/linux/hid.h b/include/linux/hid.h index 12e746ab52f0..acc04ad8ec9c 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -246,8 +246,6 @@ struct hid_item { #define HID_CP_SELECTION 0x000c0080 #define HID_CP_MEDIASELECTION 0x000c0087 #define HID_CP_SELECTDISC 0x000c00ba -#define HID_CP_VOLUMEUP 0x000c00e9 -#define HID_CP_VOLUMEDOWN 0x000c00ea #define HID_CP_PLAYBACKSPEED 0x000c00f1 #define HID_CP_PROXIMITY 0x000c0109 #define HID_CP_SPEAKERSYSTEM 0x000c0160 @@ -531,12 +529,10 @@ struct hid_device { /* device report descriptor */ * battery is non-NULL. */ struct power_supply *battery; - __s32 battery_capacity; __s32 battery_min; __s32 battery_max; __s32 battery_report_type; __s32 battery_report_id; - bool battery_reported; #endif unsigned int status; /* see STAT flags above */ @@ -764,22 +760,6 @@ struct hid_ll_driver { int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype); }; -extern struct hid_ll_driver i2c_hid_ll_driver; -extern struct hid_ll_driver hidp_hid_driver; -extern struct hid_ll_driver uhid_hid_driver; -extern struct hid_ll_driver usb_hid_driver; - -static inline bool hid_is_using_ll_driver(struct hid_device *hdev, - struct hid_ll_driver *driver) -{ - return hdev->ll_driver == driver; -} - -static inline bool hid_is_usb(struct hid_device *hdev) -{ - return hid_is_using_ll_driver(hdev, &usb_hid_driver); -} - #define PM_HINT_FULLON 1<<5 #define PM_HINT_NORMAL 1<<1 @@ -1145,7 +1125,8 @@ static inline void hid_hw_wait(struct hid_device *hdev) */ static inline u32 hid_report_len(struct hid_report *report) { - return DIV_ROUND_UP(report->size, 8) + (report->id > 0); + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); } int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 151ff61419dd..b71d142a93f3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -92,7 +92,7 @@ void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); extern struct mutex *hugetlb_fault_mutex_table; u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx); + pgoff_t idx, unsigned long address); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); @@ -504,9 +504,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { atomic_long_sub(l, &mm->hugetlb_usage); } - -void set_page_huge_active(struct page *page); - #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; #define alloc_huge_page(v, a, r) NULL diff --git a/include/linux/ide.h b/include/linux/ide.h index eb2ac48c99db..a633898f36ac 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -128,6 +128,7 @@ struct ide_io_ports { */ #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ +#define SECTOR_SIZE 512 /* * Timeouts for various operations: diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 450ea5e38564..99b7ed47fc11 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -638,16 +638,6 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0; } -/** - * ieee80211_is_frag - check if a frame is a fragment - * @hdr: 802.11 header of the frame - */ -static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) -{ - return ieee80211_has_morefrags(hdr->frame_control) || - hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG); -} - struct ieee80211s_hdr { u8 flags; u8 ttl; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index cfcbc49f4ddf..a4ccc3122f93 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -70,14 +70,13 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, if (likely(success)) { struct vlan_pcpu_stats *pcpu_stats; - pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); + pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += len; if (multicast) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); - put_cpu_ptr(vlan->pcpu_stats); } else { this_cpu_inc(vlan->pcpu_stats->rx_errors); } diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 40429b818b45..dd676ba758ee 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -30,8 +30,6 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ -#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ - /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -480,10 +478,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; + unsigned int vlan_depth = skb->mac_len; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -498,12 +496,13 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr vhdr, *vh; + struct vlan_hdr *vh; - vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); - if (unlikely(!vh || !--parse_depth)) + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) return 0; + vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (type == htons(ETH_P_8021Q) || @@ -523,25 +522,11 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(const struct sk_buff *skb) +static inline __be16 vlan_get_protocol(struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } -/* A getter for the SKB protocol field which will handle VLAN tags consistently - * whether VLAN acceleration is enabled or not. - */ -static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) -{ - if (!skip_vlan) - /* VLAN acceleration strips the VLAN header from the skb and - * moves it to skb->vlan_proto - */ - return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; - - return vlan_get_protocol(skb); -} - static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { diff --git a/include/linux/input.h b/include/linux/input.h index b365d7c31e58..1e967694e9a5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -529,7 +529,6 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file); int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file); -int input_ff_flush(struct input_dev *dev, struct file *file); int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 188bd1768971..d86ac620f0aa 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -447,8 +447,6 @@ struct intel_iommu { struct device *iommu_dev; /* IOMMU-sysfs device */ int node; u32 flags; /* Software defined flags */ - - struct dmar_drhd_unit *drhd; }; static inline void __iommu_flush_cache( diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index cf1a1c126e89..1eee6bcfcf76 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -123,16 +123,6 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) return ns; } -static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) -{ - if (ns) { - if (atomic_inc_not_zero(&ns->count)) - return ns; - } - - return NULL; -} - extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, @@ -149,11 +139,6 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) return ns; } -static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) -{ - return ns; -} - static inline void put_ipc_ns(struct ipc_namespace *ns) { } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 006ef813959b..4db62045f01a 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -192,8 +192,6 @@ struct kretprobe { raw_spinlock_t lock; }; -#define KRETPROBE_MAX_DATA_SIZE 4096 - struct kretprobe_instance { struct hlist_node hlist; struct kretprobe *rp; diff --git a/include/linux/kref.h b/include/linux/kref.h index 171f68a43d1d..edbc8f653c10 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -25,8 +25,6 @@ struct kref { atomic_t refcount; }; -#define KREF_INIT(n) { .refcount = ATOMIC_INIT(n), } - /** * kref_init - initialize object. * @kref: object in question. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1e62865821d9..19291f86d16c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -953,15 +952,7 @@ __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) static inline unsigned long __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) { - /* - * The index was checked originally in search_memslots. To avoid - * that a malicious guest builds a Spectre gadget out of e.g. page - * table walks, do not let the processor speculate loads outside - * the guest's registered memslots. - */ - unsigned long offset = gfn - slot->base_gfn; - offset = array_index_nospec(offset, slot->npages); - return slot->userspace_addr + offset * PAGE_SIZE; + return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; } static inline int memslot_id(struct kvm *kvm, gfn_t gfn) diff --git a/include/linux/libata.h b/include/linux/libata.h index b8c06ef83331..ec49344f7555 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -406,7 +406,7 @@ enum { /* This should match the actual table size of * ata_eh_cmd_timeout_table in libata-eh.c. */ - ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6, /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependent */ @@ -437,7 +437,6 @@ enum { ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ - ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 8a9bffb2dac4..cf4832db2b29 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1121,22 +1121,22 @@ * * @binder_set_context_mgr * Check whether @mgr is allowed to be the binder context manager. - * @mgr contains the struct cred for the current binder process. + * @mgr contains the task_struct for the task being registered. * Return 0 if permission is granted. * @binder_transaction * Check whether @from is allowed to invoke a binder transaction call * to @to. - * @from contains the struct cred for the sending process. - * @to contains the struct cred for the receiving process. - * @binder_transfer_binder: + * @from contains the task_struct for the sending task. + * @to contains the task_struct for the receiving task. + * @binder_transfer_binder * Check whether @from is allowed to transfer a binder reference to @to. - * @from contains the struct cred for the sending process. - * @to contains the struct cred for the receiving process. - * @binder_transfer_file: + * @from contains the task_struct for the sending task. + * @to contains the task_struct for the receiving task. + * @binder_transfer_file * Check whether @from is allowed to transfer @file to @to. - * @from contains the struct cred for the sending process. + * @from contains the task_struct for the sending task. * @file contains the struct file being transferred. - * @to contains the struct cred for the receiving process. + * @to contains the task_struct for the receiving task. * * @ptrace_access_check: * Check permission before allowing the current process to trace the @@ -1301,13 +1301,13 @@ */ union security_list_options { - int (*binder_set_context_mgr)(const struct cred *mgr); - int (*binder_transaction)(const struct cred *from, - const struct cred *to); - int (*binder_transfer_binder)(const struct cred *from, - const struct cred *to); - int (*binder_transfer_file)(const struct cred *from, - const struct cred *to, + int (*binder_set_context_mgr)(struct task_struct *mgr); + int (*binder_transaction)(struct task_struct *from, + struct task_struct *to); + int (*binder_transfer_binder)(struct task_struct *from, + struct task_struct *to); + int (*binder_transfer_file)(struct task_struct *from, + struct task_struct *to, struct file *file); int (*ptrace_access_check)(struct task_struct *child, diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h index 821a3b9bc16e..67703f23e7ba 100644 --- a/include/linux/mfd/abx500/ux500_chargalg.h +++ b/include/linux/mfd/abx500/ux500_chargalg.h @@ -15,7 +15,7 @@ * - POWER_SUPPLY_TYPE_USB, * because only them store as drv_data pointer to struct ux500_charger. */ -#define psy_to_ux500_charger(x) power_supply_get_drvdata(x) +#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy) /* Forward declaration */ struct ux500_charger; diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 93c14e9df630..3ab3cede28ea 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -50,11 +50,9 @@ enum { EC_MSG_TX_TRAILER_BYTES, EC_MSG_RX_PROTO_BYTES = 3, - /* Max length of messages for proto 2*/ - EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + + /* Max length of messages */ + EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE + EC_MSG_TX_PROTO_BYTES, - - EC_MAX_MSG_BYTES = 64 * 1024, }; /* diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h index 52d53d134f72..1b63fc2f42d1 100644 --- a/include/linux/mfd/rt5033-private.h +++ b/include/linux/mfd/rt5033-private.h @@ -203,13 +203,13 @@ enum rt5033_reg { #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21 +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32 /* RT5033 regulator LDO output voltage uV */ #define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U #define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19 +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32 /* RT5033 regulator SAFE LDO output voltage uV */ #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U diff --git a/include/linux/msi.h b/include/linux/msi.h index 037f47fe76e6..d0d50cf00b4d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -128,7 +128,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e080ad78e2ba..5faf056cfd9b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2055,7 +2055,6 @@ struct packet_type { struct net_device *); bool (*id_match)(struct packet_type *ptype, struct sock *sk); - struct net *af_packet_net; void *af_packet_priv; struct list_head list; }; @@ -3433,7 +3432,6 @@ static inline void netif_tx_disable(struct net_device *dev) local_bh_disable(); cpu = smp_processor_id(); - spin_lock(&dev->tx_global_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); @@ -3441,7 +3439,6 @@ static inline void netif_tx_disable(struct net_device *dev) netif_tx_stop_queue(txq); __netif_tx_unlock(txq); } - spin_unlock(&dev->tx_global_lock); local_bh_enable(); } diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 417e3a53dab8..6923e4049de3 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -135,7 +135,6 @@ struct xt_match { const char *table; unsigned int matchsize; - unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -176,7 +175,6 @@ struct xt_target { const char *table; unsigned int targetsize; - unsigned int usersize; #ifdef CONFIG_COMPAT unsigned int compatsize; #endif @@ -256,13 +254,6 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); -int xt_match_to_user(const struct xt_entry_match *m, - struct xt_entry_match __user *u); -int xt_target_to_user(const struct xt_entry_target *t, - struct xt_entry_target __user *u); -int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size); - void *xt_copy_counters_from_user(const void __user *user, unsigned int len, struct xt_counters_info *info, bool compat); @@ -336,7 +327,7 @@ static inline unsigned int xt_write_recseq_begin(void) * since addend is most likely 1 */ __this_cpu_add(xt_recseq.sequence, addend); - smp_mb(); + smp_wmb(); return addend; } diff --git a/include/linux/of.h b/include/linux/of.h index 31964ddc2442..2772f027f88f 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -941,7 +941,6 @@ static inline int of_get_available_child_count(const struct device_node *np) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section(__##table##_of_table) \ - __aligned(__alignof__(struct of_device_id)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else diff --git a/include/linux/pci.h b/include/linux/pci.h index c871b19cc915..5f37614f2451 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1442,9 +1442,8 @@ static inline int pci_set_dma_seg_boundary(struct pci_dev *dev, { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __must_check __pci_register_driver(struct pci_driver *drv, - struct module *owner, - const char *mod_name) +static inline int __pci_register_driver(struct pci_driver *drv, + struct module *owner) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index 890f53881fad..522757ac9cd4 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -75,7 +75,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAX17042_TOFF = 0x2D, + MAx17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index ba1d810ae524..9653eec2c554 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -272,7 +272,6 @@ enum power_supply_property { POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION, POWER_SUPPLY_PROP_ALLOW_HVDCP3, POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED, - POWER_SUPPLY_PROP_FG_RESET_CLOCK, POWER_SUPPLY_PROP_SKIN_TEMP, POWER_SUPPLY_PROP_SMART_CHARGING_ACTIVATION, POWER_SUPPLY_PROP_SMART_CHARGING_INTERRUPTION, diff --git a/include/linux/prandom.h b/include/linux/prandom.h index e20339c78a84..cc1e71334e53 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -93,7 +93,7 @@ static inline u32 __seed(u32 x, u32 m) */ static inline void prandom_seed_state(struct rnd_state *state, u64 seed) { - u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL; + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; state->s1 = __seed(i, 2U); state->s2 = __seed(i, 8U); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 3072e9c93ae6..0a93e9d1708e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -880,7 +880,9 @@ static __always_inline void rcu_read_lock(void) * Unfortunately, this function acquires the scheduler's runqueue and * priority-inheritance spinlocks. This means that deadlock could result * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. + * any lock that is ever acquired while holding them; or any lock which + * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() + * does not disable irqs while taking ->wait_lock. * * That said, RCU readers are never priority boosted unless they were * preempted. Therefore, one way to avoid deadlock is to make sure diff --git a/include/linux/sched.h b/include/linux/sched.h index f2261d3e33b7..fa4699ef9cd5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,6 +184,28 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern u64 nr_running_integral(unsigned int cpu); #endif +extern void sched_update_nr_prod(int cpu, long delta, bool inc); +extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, + unsigned int *max_nr, + unsigned int *big_max_nr); +extern u64 sched_get_cpu_last_busy_time(int cpu); + +#ifdef CONFIG_SMP +extern u32 sched_get_wake_up_idle(struct task_struct *p); +extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle); +#else +static inline u32 sched_get_wake_up_idle(struct task_struct *p) +{ + return 0; +} + +static inline int sched_set_wake_up_idle(struct task_struct *p, + int wake_up_idle) +{ + return 0; +} +#endif /* CONFIG_SMP */ + extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) @@ -325,6 +347,8 @@ extern char ___assert_task_state[1 - 2*!!( /* Task command name length */ #define TASK_COMM_LEN 16 +extern const char *sched_window_reset_reasons[]; + enum task_event { PUT_PREV_TASK = 0, PICK_NEXT_TASK = 1, @@ -334,6 +358,12 @@ enum task_event { IRQ_UPDATE = 5, }; +/* Note: this need to be in sync with migrate_type_names array */ +enum migrate_types { + GROUP_TO_RQ, + RQ_TO_GROUP, +}; + #include /* @@ -361,6 +391,41 @@ extern cpumask_var_t cpu_isolated_map; extern int runqueue_is_locked(int cpu); +#ifdef CONFIG_HOTPLUG_CPU +extern int sched_isolate_count(const cpumask_t *mask, bool include_offline); +extern int sched_isolate_cpu(int cpu); +extern int sched_unisolate_cpu(int cpu); +extern int sched_unisolate_cpu_unlocked(int cpu); +#else +static inline int sched_isolate_count(const cpumask_t *mask, + bool include_offline) +{ + cpumask_t count_mask; + + if (include_offline) + cpumask_andnot(&count_mask, mask, cpu_online_mask); + else + return 0; + + return cpumask_weight(&count_mask); +} + +static inline int sched_isolate_cpu(int cpu) +{ + return 0; +} + +static inline int sched_unisolate_cpu(int cpu) +{ + return 0; +} + +static inline int sched_unisolate_cpu_unlocked(int cpu) +{ + return 0; +} +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) extern void nohz_balance_enter_idle(int cpu); extern void set_cpu_sd_state_idle(void); @@ -1388,8 +1453,8 @@ struct sched_statistics { }; #endif -#ifdef CONFIG_SCHED_WALT #define RAVG_HIST_SIZE_MAX 5 +#define NUM_BUSY_BUCKETS 10 /* ravg represents frequency scaled cpu-demand of tasks */ struct ravg { @@ -1409,19 +1474,31 @@ struct ravg { * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency * demand for tasks. * - * 'curr_window' represents task's contribution to cpu busy time - * statistics (rq->curr_runnable_sum) in current window + * 'curr_window_cpu' represents task's contribution to cpu busy time on + * various CPUs in the current window + * + * 'prev_window_cpu' represents task's contribution to cpu busy time on + * various CPUs in the previous window + * + * 'curr_window' represents the sum of all entries in curr_window_cpu + * + * 'prev_window' represents the sum of all entries in prev_window_cpu * - * 'prev_window' represents task's contribution to cpu busy time - * statistics (rq->prev_runnable_sum) in previous window + * 'pred_demand' represents task's current predicted cpu busy time + * + * 'busy_buckets' groups historical busy time into different buckets + * used for prediction */ u64 mark_start; u32 sum, demand; u32 sum_history[RAVG_HIST_SIZE_MAX]; + u32 *curr_window_cpu, *prev_window_cpu; u32 curr_window, prev_window; + u64 curr_burst, avg_burst, avg_sleep_time; u16 active_windows; + u32 pred_demand; + u8 busy_buckets[NUM_BUSY_BUCKETS]; }; -#endif struct sched_entity { struct load_weight load; /* for load-balancing */ @@ -1597,13 +1674,19 @@ struct task_struct { const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; -#ifdef CONFIG_SCHED_WALT +#ifdef CONFIG_SCHED_HMP struct ravg ravg; /* * 'init_load_pct' represents the initial task load assigned to children * of this task */ u32 init_load_pct; + u64 last_wake_ts; + u64 last_switch_out_ts; + u64 last_cpu_selected_ts; + struct related_thread_group *grp; + struct list_head grp_list; + u64 cpu_cycles; u64 last_sleep_ts; #endif #ifdef CONFIG_CGROUP_SCHED @@ -1896,8 +1979,6 @@ struct task_struct { #endif struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; - struct mutex futex_exit_mutex; - unsigned int futex_state; #endif #ifdef CONFIG_PERF_EVENTS struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; @@ -2302,7 +2383,9 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, /* * Per process flags */ +#define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */ #define PF_EXITING 0x00000004 /* getting shut down */ +#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ @@ -2486,10 +2569,6 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); extern bool cpupri_check_rt(void); -static inline void set_wake_up_idle(bool enabled) -{ - /* do nothing for now */ -} #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) @@ -2508,8 +2587,96 @@ static inline bool cpupri_check_rt(void) } #endif -extern u32 sched_get_wake_up_idle(struct task_struct *p); -extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle); +struct sched_load { + unsigned long prev_load; + unsigned long new_task_load; + unsigned long predicted_load; +}; + +struct cpu_cycle_counter_cb { + u64 (*get_cpu_cycle_counter)(int cpu); +}; + +#define MAX_NUM_CGROUP_COLOC_ID 20 + +#ifdef CONFIG_SCHED_HMP +extern void free_task_load_ptrs(struct task_struct *p); +extern int sched_set_window(u64 window_start, unsigned int window_size); +extern unsigned long sched_get_busy(int cpu); +extern void sched_get_cpus_busy(struct sched_load *busy, + const struct cpumask *query_cpus); +extern void sched_set_io_is_busy(int val); +extern int sched_set_boost(int enable); +extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct); +extern u32 sched_get_init_task_load(struct task_struct *p); +extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost); +extern unsigned int sched_get_static_cpu_pwr_cost(int cpu); +extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost); +extern unsigned int sched_get_static_cluster_pwr_cost(int cpu); +extern int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle); +extern unsigned int sched_get_cluster_wake_idle(int cpu); +extern int sched_update_freq_max_load(const cpumask_t *cpumask); +extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, + u32 fmin, u32 fmax); +extern void sched_set_cpu_cstate(int cpu, int cstate, + int wakeup_energy, int wakeup_latency); +extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate, + int wakeup_energy, int wakeup_latency); +extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); +extern u64 sched_ktime_clock(void); +extern int sched_set_group_id(struct task_struct *p, unsigned int group_id); +extern unsigned int sched_get_group_id(struct task_struct *p); + +#else /* CONFIG_SCHED_HMP */ +static inline void free_task_load_ptrs(struct task_struct *p) { } + +static inline u64 sched_ktime_clock(void) +{ + return 0; +} + +static inline int +register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb) +{ + return 0; +} + +static inline int sched_set_window(u64 window_start, unsigned int window_size) +{ + return -EINVAL; +} +static inline unsigned long sched_get_busy(int cpu) +{ + return 0; +} +static inline void sched_get_cpus_busy(struct sched_load *busy, + const struct cpumask *query_cpus) {}; + +static inline void sched_set_io_is_busy(int val) {}; + +static inline int sched_set_boost(int enable) +{ + return -EINVAL; +} + +static inline int sched_update_freq_max_load(const cpumask_t *cpumask) +{ + return 0; +} + +static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus, + u32 fmin, u32 fmax) { } + +static inline void +sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency) +{ +} + +static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, + int dstate, int wakeup_energy, int wakeup_latency) +{ +} +#endif /* CONFIG_SCHED_HMP */ #ifdef CONFIG_NO_HZ_COMMON void calc_load_enter_idle(void); @@ -2519,6 +2686,14 @@ static inline void calc_load_enter_idle(void) { } static inline void calc_load_exit_idle(void) { } #endif /* CONFIG_NO_HZ_COMMON */ +static inline void set_wake_up_idle(bool enabled) +{ + if (enabled) + current->flags |= PF_WAKE_UP_IDLE; + else + current->flags &= ~PF_WAKE_UP_IDLE; +} + /* * Do not use outside of architecture code which knows its limitations. * @@ -2584,7 +2759,7 @@ extern unsigned long long task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) extern void sched_exec(void); #else #define sched_exec() {} @@ -2720,6 +2895,7 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_process_no_notif(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); @@ -2728,6 +2904,11 @@ extern void wake_up_new_task(struct task_struct *tsk); #endif extern int sched_fork(unsigned long clone_flags, struct task_struct *p); extern void sched_dead(struct task_struct *p); +#ifdef CONFIG_SCHED_HMP +extern void sched_exit(struct task_struct *p); +#else +static inline void sched_exit(struct task_struct *p) { } +#endif extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); @@ -2869,10 +3050,8 @@ extern struct mm_struct *get_task_mm(struct task_struct *task); * succeeds. */ extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); -/* Remove the current tasks stale references to the old mm_struct on exit() */ -extern void exit_mm_release(struct task_struct *, struct mm_struct *); -/* Remove the current tasks stale references to the old mm_struct on exec() */ -extern void exec_mm_release(struct task_struct *, struct mm_struct *); +/* Remove the current tasks stale references to the old mm_struct */ +extern void mm_release(struct task_struct *, struct mm_struct *); #ifdef CONFIG_HAVE_COPY_THREAD_TLS extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, @@ -3020,7 +3199,7 @@ static inline int thread_group_empty(struct task_struct *p) * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset and - * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist. + * ->cgroup.subsys[]. And ->vfork_done. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), @@ -3446,6 +3625,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +extern struct atomic_notifier_head migration_notifier_head; +struct migration_notify_data { + int src_cpu; + int dest_cpu; + int load; +}; + +extern struct atomic_notifier_head load_alert_notifier_head; + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); @@ -3539,6 +3727,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_RT (1U << 0) #define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_IOWAIT (1U << 2) +#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) #ifdef CONFIG_CPU_FREQ struct update_util_data { diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index f44dc6c8b9e5..1e1fcb8791a7 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -41,12 +41,49 @@ extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_sync_hint_enable; extern unsigned int sysctl_sched_cstate_aware; -#ifdef CONFIG_SCHED_WALT -extern unsigned int sysctl_sched_use_walt_cpu_util; -extern unsigned int sysctl_sched_use_walt_task_util; -extern unsigned int sysctl_sched_walt_init_task_load_pct; -extern unsigned int sysctl_sched_walt_cpu_high_irqload; -#endif + +#ifdef CONFIG_SCHED_HMP + +enum freq_reporting_policy { + FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK, + FREQ_REPORT_CPU_LOAD, + FREQ_REPORT_TOP_TASK, + FREQ_REPORT_INVALID_POLICY +}; + +extern int sysctl_sched_freq_inc_notify; +extern int sysctl_sched_freq_dec_notify; +extern unsigned int sysctl_sched_freq_reporting_policy; +extern unsigned int sysctl_sched_window_stats_policy; +extern unsigned int sysctl_sched_ravg_hist_size; +extern unsigned int sysctl_sched_cpu_high_irqload; +extern unsigned int sysctl_sched_init_task_load_pct; +extern __read_mostly unsigned int sysctl_sched_spill_nr_run; +extern unsigned int sysctl_sched_spill_load_pct; +extern unsigned int sysctl_sched_upmigrate_pct; +extern unsigned int sysctl_sched_downmigrate_pct; +extern unsigned int sysctl_sched_group_upmigrate_pct; +extern unsigned int sysctl_sched_group_downmigrate_pct; +extern unsigned int sysctl_early_detection_duration; +extern unsigned int sysctl_sched_boost; +extern unsigned int sysctl_sched_small_wakee_task_load_pct; +extern unsigned int sysctl_sched_big_waker_task_load_pct; +extern unsigned int sysctl_sched_select_prev_cpu_us; +extern unsigned int sysctl_sched_restrict_cluster_spill; +extern unsigned int sysctl_sched_new_task_windows; +extern unsigned int sysctl_sched_pred_alert_freq; +extern unsigned int sysctl_sched_freq_aggregate; +extern unsigned int sysctl_sched_enable_thread_grouping; +extern unsigned int sysctl_sched_freq_aggregate_threshold_pct; +extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker; +extern unsigned int sysctl_sched_short_burst; +extern unsigned int sysctl_sched_short_sleep; + +#else /* CONFIG_SCHED_HMP */ + +#define sysctl_sched_enable_hmp_task_placement 0 + +#endif /* CONFIG_SCHED_HMP */ #if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_IRQSOFF_TRACER) extern unsigned int sysctl_preemptoff_tracing_threshold_ns; @@ -78,6 +115,18 @@ int sched_proc_update_handler(struct ctl_table *table, int write, loff_t *ppos); #endif +extern int sched_migrate_notify_proc_handler(struct ctl_table *table, + int write, void __user *buffer, size_t *lenp, loff_t *ppos); + +extern int sched_hmp_proc_update_handler(struct ctl_table *table, + int write, void __user *buffer, size_t *lenp, loff_t *ppos); + +extern int sched_boost_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); + +extern int sched_window_update_handler(struct ctl_table *table, + int write, void __user *buffer, size_t *lenp, loff_t *ppos); + /* * control realtime throttling: * diff --git a/include/linux/security.h b/include/linux/security.h index 96eacf534163..7caf520e6233 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -183,13 +183,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) extern int security_init(void); /* Security operations */ -int security_binder_set_context_mgr(const struct cred *mgr); -int security_binder_transaction(const struct cred *from, - const struct cred *to); -int security_binder_transfer_binder(const struct cred *from, - const struct cred *to); -int security_binder_transfer_file(const struct cred *from, - const struct cred *to, struct file *file); +int security_binder_set_context_mgr(struct task_struct *mgr); +int security_binder_transaction(struct task_struct *from, + struct task_struct *to); +int security_binder_transfer_binder(struct task_struct *from, + struct task_struct *to); +int security_binder_transfer_file(struct task_struct *from, + struct task_struct *to, struct file *file); int security_ptrace_access_check(struct task_struct *child, unsigned int mode); int security_ptrace_traceme(struct task_struct *parent); int security_capget(struct task_struct *target, @@ -380,25 +380,25 @@ static inline int security_init(void) return 0; } -static inline int security_binder_set_context_mgr(const struct cred *mgr) +static inline int security_binder_set_context_mgr(struct task_struct *mgr) { return 0; } -static inline int security_binder_transaction(const struct cred *from, - const struct cred *to) +static inline int security_binder_transaction(struct task_struct *from, + struct task_struct *to) { return 0; } -static inline int security_binder_transfer_binder(const struct cred *from, - const struct cred *to) +static inline int security_binder_transfer_binder(struct task_struct *from, + struct task_struct *to) { return 0; } -static inline int security_binder_transfer_file(const struct cred *from, - const struct cred *to, +static inline int security_binder_transfer_file(struct task_struct *from, + struct task_struct *to, struct file *file) { return 0; diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index d4c3c9bab582..fb7eb9ccb1cd 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -29,7 +29,7 @@ static inline void seq_buf_clear(struct seq_buf *s) } static inline void -seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) { s->buffer = buf; s->size = size; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index e305b66a9fb9..f3d45dd42695 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -117,9 +117,9 @@ __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); void seq_puts(struct seq_file *m, const char *s); -void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, +void seq_put_decimal_ull(struct seq_file *m, char delimiter, unsigned long long num); -void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); +void seq_put_decimal_ll(struct seq_file *m, char delimiter, long long num); void seq_escape(struct seq_file *m, const char *s, const char *esc); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, diff --git a/include/linux/shm.h b/include/linux/shm.h index fbb74824f0df..6fb801686ad6 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -19,18 +19,9 @@ struct shmid_kernel /* private to the kernel */ pid_t shm_lprid; struct user_struct *mlock_user; - /* - * The task created the shm object, for - * task_lock(shp->shm_creator) - */ + /* The task created the shm object. NULL if the task is dead. */ struct task_struct *shm_creator; - - /* - * List by creator. task_lock(->shm_creator) required for read/write. - * If list_empty(), then the creator is dead already. - */ - struct list_head shm_clist; - struct ipc_namespace *ns; + struct list_head shm_clist; /* list by creator */ }; /* shm_mode upper byte flags */ diff --git a/include/linux/siphash.h b/include/linux/siphash.h index 0cda61855d90..bf21591a9e5e 100644 --- a/include/linux/siphash.h +++ b/include/linux/siphash.h @@ -27,7 +27,9 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key) } u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif u64 siphash_1u64(const u64 a, const siphash_key_t *key); u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); @@ -80,9 +82,10 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, static inline u64 siphash(const void *data, size_t len, const siphash_key_t *key) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || - !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) return __siphash_unaligned(data, len, key); +#endif return ___siphash_aligned(data, len, key); } @@ -93,8 +96,10 @@ typedef struct { u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key); +#endif u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); @@ -130,9 +135,10 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, static inline u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || - !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) return __hsiphash_unaligned(data, len, key); +#endif return ___hsiphash_aligned(data, len, key); } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5436e629259d..95feb153fe9a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1502,7 +1502,7 @@ static inline void __skb_insert(struct sk_buff *newsk, newsk->next = next; newsk->prev = prev; next->prev = prev->next = newsk; - WRITE_ONCE(list->qlen, list->qlen + 1); + list->qlen++; } static inline void __skb_queue_splice(const struct sk_buff_head *list, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 2f4db51e3f3e..01cf8b6ac61a 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -430,9 +430,6 @@ struct spi_master { /* flag indicating this is an SPI slave controller */ bool slave; - /* flag indicating this is a non-devres managed controller */ - bool devm_allocated; - /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -598,9 +595,6 @@ static inline struct spi_master *spi_alloc_slave(struct device *host, return __spi_alloc_controller(host, size, true); } -extern struct spi_master * -devm_spi_alloc_master(struct device *dev, unsigned int size); - extern int spi_register_master(struct spi_master *master); extern int devm_spi_register_master(struct device *dev, struct spi_master *master); diff --git a/include/linux/string.h b/include/linux/string.h index 1a9589a5ace6..870268d42ae7 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -28,10 +28,6 @@ size_t strlcpy(char *, const char *, size_t); #ifndef __HAVE_ARCH_STRSCPY ssize_t strscpy(char *, const char *, size_t); #endif - -/* Wraps calls to strscpy()/memset(), no arch specific code required */ -ssize_t strscpy_pad(char *dest, const char *src, size_t count); - #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif @@ -102,36 +98,6 @@ extern __kernel_size_t strcspn(const char *,const char *); #ifndef __HAVE_ARCH_MEMSET extern void * memset(void *,int,__kernel_size_t); #endif - -#ifndef __HAVE_ARCH_MEMSET16 -extern void *memset16(uint16_t *, uint16_t, __kernel_size_t); -#endif - -#ifndef __HAVE_ARCH_MEMSET32 -extern void *memset32(uint32_t *, uint32_t, __kernel_size_t); -#endif - -#ifndef __HAVE_ARCH_MEMSET64 -extern void *memset64(uint64_t *, uint64_t, __kernel_size_t); -#endif - -static inline void *memset_l(unsigned long *p, unsigned long v, - __kernel_size_t n) -{ - if (BITS_PER_LONG == 32) - return memset32((uint32_t *)p, v, n); - else - return memset64((uint64_t *)p, v, n); -} - -static inline void *memset_p(void **p, void *v, __kernel_size_t n) -{ - if (BITS_PER_LONG == 32) - return memset32((uint32_t *)p, (uintptr_t)v, n); - else - return memset64((uint64_t *)p, (uintptr_t)v, n); -} - #ifndef __HAVE_ARCH_MEMCPY extern void * memcpy(void *,const void *,__kernel_size_t); #endif diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 8def5e0a491f..70c6b92e15a7 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -23,7 +23,8 @@ #define XDR_QUADLEN(l) (((l) + 3) >> 2) /* - * Generic opaque `network object.' + * Generic opaque `network object.' At the kernel level, this type + * is used only by lockd. */ #define XDR_MAX_NETOBJ 1024 struct xdr_netobj { diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 18fecaa860b7..2839d624d5ee 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -300,11 +300,6 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) return kernfs_enable_ns(kn); } -__printf(2, 3) -int sysfs_emit(char *buf, const char *fmt, ...); -__printf(3, 4) -int sysfs_emit_at(char *buf, int at, const char *fmt, ...); - #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) @@ -511,17 +506,6 @@ static inline void sysfs_enable_ns(struct kernfs_node *kn) { } -__printf(2, 3) -static inline int sysfs_emit(char *buf, const char *fmt, ...) -{ - return 0; -} - -__printf(3, 4) -static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) -{ - return 0; -} #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index f5be2716b01c..cfaf5a1d4bad 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -11,7 +11,7 @@ */ struct trace_seq { - char buffer[PAGE_SIZE]; + unsigned char buffer[PAGE_SIZE]; struct seq_buf seq; int full; }; @@ -50,7 +50,7 @@ static inline int trace_seq_used(struct trace_seq *s) * that is about to be written to and then return the result * of that write. */ -static inline char * +static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { return s->buffer + seq_buf_used(&s->seq); diff --git a/include/linux/tty.h b/include/linux/tty.h index 5d4f5806da46..e5b15a83c8d7 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -280,10 +280,6 @@ struct tty_struct { struct termiox *termiox; /* May be NULL for unsupported */ char name[64]; struct pid *pgrp; /* Protected by ctrl lock */ - /* - * Writes protected by both ctrl lock and legacy mutex, readers must use - * at least one of them. - */ struct pid *session; unsigned long flags; int count; diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 6d8db1555260..161052477f77 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -235,7 +235,7 @@ * * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel * structure to complete. This method is optional and will only be called - * if provided (otherwise ENOTTY will be returned). + * if provided (otherwise EINVAL will be returned). */ #include diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 7b38288dc239..df89c9bcba7d 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -68,13 +68,12 @@ struct u64_stats_sync { }; -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) -#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) -#else static inline void u64_stats_init(struct u64_stats_sync *syncp) { -} +#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) + seqcount_init(&syncp->seq); #endif +} static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) { diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 6b6c1138f963..584f9a647ad4 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -11,8 +11,7 @@ #define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256) #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL -#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size -extern uint virtio_transport_max_vsock_pkt_buf_size; +#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) enum { VSOCK_VQ_RX = 0, /* for host to guest data */ diff --git a/include/linux/wait.h b/include/linux/wait.h index 419b5b2bf547..513b36f04dfd 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -151,7 +151,6 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); -void __wake_up_pollfree(wait_queue_head_t *wq_head); void __wake_up_bit(wait_queue_head_t *, void *, int); int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); @@ -186,31 +185,6 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) -/** - * wake_up_pollfree - signal that a polled waitqueue is going away - * @wq_head: the wait queue head - * - * In the very rare cases where a ->poll() implementation uses a waitqueue whose - * lifetime is tied to a task rather than to the 'struct file' being polled, - * this function must be called before the waitqueue is freed so that - * non-blocking polls (e.g. epoll) are notified that the queue is going away. - * - * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via - * an explicit synchronize_rcu() or call_rcu(), or via SLAB_DESTROY_BY_RCU. - */ -static inline void wake_up_pollfree(wait_queue_head_t *wq_head) -{ - /* - * For performance reasons, we don't always take the queue lock here. - * Therefore, we might race with someone removing the last entry from - * the queue, and proceed while they still hold the queue lock. - * However, rcu_read_lock() is required to be held in such cases, so we - * can safely proceed with an RCU-delayed free. - */ - if (waitqueue_active(wq_head)) - __wake_up_pollfree(wq_head); -} - #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 2e97b7707dff..0c567847e28f 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -7,6 +7,11 @@ * storage pool implementations. Typically, this is used to * store compressed memory. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #ifndef _ZPOOL_H_ #define _ZPOOL_H_ @@ -60,6 +65,9 @@ void zpool_unmap_handle(struct zpool *pool, unsigned long handle); u64 zpool_get_total_size(struct zpool *pool); +unsigned long zpool_compact(struct zpool *pool); + +unsigned long zpool_get_num_compacted(struct zpool *zpool); /** * struct zpool_driver - driver implementation for zpool @@ -101,6 +109,10 @@ struct zpool_driver { void (*unmap)(void *pool, unsigned long handle); u64 (*total_size)(void *pool); + + unsigned long (*compact)(void *pool); + + unsigned long (*get_num_compacted)(void *pool); }; void zpool_register_driver(struct zpool_driver *driver); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 79f2e1ccfcfb..fd60eccb59a6 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -8,7 +8,6 @@ void unix_inflight(struct user_struct *user, struct file *fp); void unix_notinflight(struct user_struct *user, struct file *fp); -void unix_destruct_scm(struct sk_buff *skb); void unix_gc(void); void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e1e181059324..5aaf6cdb121a 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -498,7 +498,6 @@ struct hci_chan { struct sk_buff_head data_q; unsigned int sent; __u8 state; - bool amp; }; struct hci_conn_params { @@ -1013,7 +1012,6 @@ struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); -void hci_cleanup_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); diff --git a/include/net/bonding.h b/include/net/bonding.h index 1d85c5179fa8..6fbfc21b27b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -181,11 +181,6 @@ struct slave { struct rtnl_link_stats64 slave_stats; }; -static inline struct slave *to_slave(struct kobject *kobj) -{ - return container_of(kobj, struct slave, kobj); -} - struct bond_up_slave { unsigned int count; struct rcu_head rcu; @@ -672,9 +667,6 @@ extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; -/* exported from bond_sysfs_slave.c */ -extern const struct sysfs_ops slave_sysfs_ops; - static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h index 0baf2e21a533..028b754ae9b1 100644 --- a/include/net/caif/caif_dev.h +++ b/include/net/caif/caif_dev.h @@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer); * The link_support layer is used to add any Link Layer specific * framing. */ -int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, struct cflayer *link_support, int head_room, struct cflayer **layer, int (**rcv_func)( struct sk_buff *, struct net_device *, diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h index 219094ace893..70bfd017581f 100644 --- a/include/net/caif/cfcnfg.h +++ b/include/net/caif/cfcnfg.h @@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg); * @fcs: Specify if checksum is used in CAIF Framing Layer. * @head_room: Head space needed by link specific protocol. */ -int +void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h index bc3fae07a25f..b5b020f3c72e 100644 --- a/include/net/caif/cfserl.h +++ b/include/net/caif/cfserl.h @@ -9,5 +9,4 @@ #include struct cflayer *cfserl_create(int instance, bool use_stx); -void cfserl_release(struct cflayer *layer); #endif diff --git a/include/net/checksum.h b/include/net/checksum.h index b354fb7cd052..9fcaedf994ee 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -138,11 +138,6 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } -static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) -{ - *csum = csum_add(csum_sub(*csum, old), new); -} - struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 4303a28aa2dd..30a56ab2ccfb 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -31,9 +31,7 @@ static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb) return &md_dst->u.tun_info; dst = skb_dst(skb); - if (dst && dst->lwtstate && - (dst->lwtstate->type == LWTUNNEL_ENCAP_IP || - dst->lwtstate->type == LWTUNNEL_ENCAP_IP6)) + if (dst && dst->lwtstate) return lwt_tun_info(dst->lwtstate); return NULL; @@ -97,6 +95,7 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, sizeof(struct ip_tunnel_info) + md_size); skb_dst_drop(skb); + dst_hold(&new_md->dst); skb_dst_set(skb, &new_md->dst); return new_md; } diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 245d999c0eac..dce2d586d9ce 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -3,7 +3,6 @@ #include #include -#include #include #include diff --git a/include/net/ip.h b/include/net/ip.h index 9919332daf2c..c0429f813013 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -356,18 +356,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); - /* We had many attacks based on IPID, use the private - * generator as much as we can. - */ - if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; - return; - } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { - iph->id = 0; + /* This is only to work around buggy Windows95/2000 + * VJ compression implementations. If the ID field + * does not change, they drop every other packet in + * a TCP stream using header compression. + */ + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); + inet_sk(sk)->inet_id += segs; + } else { + iph->id = 0; + } } else { - /* Unfortunately we need the big hammer to get a suitable IPID */ __ip_select_ident(net, iph, segs); } } diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index a53853dbd226..33f28e62b790 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -182,7 +182,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); -static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) +static inline int ip6_skb_dst_mtu(struct sk_buff *skb) { struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; diff --git a/include/net/llc.h b/include/net/llc.h index 18dfd3e49a69..95e5ced4c133 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -72,9 +72,7 @@ struct llc_sap { static inline struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex) { - u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS); - - return &sap->sk_dev_hash[bucket]; + return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES]; } static inline diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index 49aa79c7b278..c0f0a13ed818 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -15,11 +15,9 @@ #include /* Lengths of frame formats */ -#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ -#define LLC_PDU_LEN_S 4 -#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ -/* header and 1 control byte and XID info */ -#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info)) +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ +#define LLC_PDU_LEN_S 4 +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */ /* Known SAP addresses */ #define LLC_GLOBAL_SAP 0xFF #define LLC_NULL_SAP 0x00 /* not network-layer visible */ @@ -52,10 +50,9 @@ #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */ #define LLC_PDU_TYPE_MASK 0x03 -#define LLC_PDU_TYPE_I 0 /* first bit */ -#define LLC_PDU_TYPE_S 1 /* first two bits */ -#define LLC_PDU_TYPE_U 3 /* first two bits */ -#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */ +#define LLC_PDU_TYPE_I 0 /* first bit */ +#define LLC_PDU_TYPE_S 1 /* first two bits */ +#define LLC_PDU_TYPE_U 3 /* first two bits */ #define LLC_PDU_TYPE_IS_I(pdu) \ ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0) @@ -233,18 +230,9 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type, u8 ssap, u8 dsap, u8 cr) { - int hlen = 4; /* default value for I and S types */ + const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4; struct llc_pdu_un *pdu; - switch (type) { - case LLC_PDU_TYPE_U: - hlen = 3; - break; - case LLC_PDU_TYPE_U_XID: - hlen = 6; - break; - } - skb_push(skb, hlen); skb_reset_network_header(skb); pdu = llc_pdu_un_hdr(skb); @@ -386,10 +374,7 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb, xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */ xid_info->type = svcs_supported; xid_info->rw = rx_window << 1; /* size of receive window */ - - /* no need to push/put since llc_pdu_header_init() has already - * pushed 3 + 3 bytes - */ + skb_put(skb, sizeof(struct llc_xid_info)); } /** diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h index 630f0f5c3fa3..12f4cc841b6e 100644 --- a/include/net/netfilter/nf_nat_l4proto.h +++ b/include/net/netfilter/nf_nat_l4proto.h @@ -64,7 +64,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, - const struct nf_conn *ct); + const struct nf_conn *ct, u16 *rover); int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range); diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index fd0546b7566d..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -32,7 +32,7 @@ void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *q void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); -bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); +void nf_queue_entry_get_refs(struct nf_queue_entry *entry); void nf_queue_entry_release_refs(struct nf_queue_entry *entry); static inline void init_hashrandom(u32 *jhash_initval) diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 53bd3c952ed4..57ce24fb0047 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -42,7 +42,6 @@ enum nci_flag { NCI_UP, NCI_DATA_EXCHANGE, NCI_DATA_EXCHANGE_TO, - NCI_UNREG, }; /* NCI device states */ @@ -301,7 +300,6 @@ int nci_core_conn_create(struct nci_dev *ndev, u8 destination_type, int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev); -void nci_hci_deallocate(struct nci_dev *ndev); int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, const u8 *param, size_t param_len); int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, diff --git a/include/net/nl802154.h b/include/net/nl802154.h index 53f140fc1983..32cb3e591e07 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -19,8 +19,6 @@ * */ -#include - #define NL802154_GENL_NAME "nl802154" enum nl802154_commands { @@ -145,9 +143,10 @@ enum nl802154_attrs { }; enum nl802154_iftype { - NL802154_IFTYPE_UNSPEC = (~(__u32)0), + /* for backwards compatibility TODO */ + NL802154_IFTYPE_UNSPEC = -1, - NL802154_IFTYPE_NODE = 0, + NL802154_IFTYPE_NODE, NL802154_IFTYPE_MONITOR, NL802154_IFTYPE_COORD, diff --git a/include/net/red.h b/include/net/red.h index 117a3654d319..3618cdfec884 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -167,24 +167,14 @@ static inline void red_set_vars(struct red_vars *v) v->qcount = -1; } -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, - u8 Scell_log, u8 *stab) +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog) { - if (fls(qth_min) + Wlog >= 32) + if (fls(qth_min) + Wlog > 32) return false; - if (fls(qth_max) + Wlog >= 32) - return false; - if (Scell_log >= 32) + if (fls(qth_max) + Wlog > 32) return false; if (qth_max < qth_min) return false; - if (stab) { - int i; - - for (i = 0; i < RED_STAB_SIZE; i++) - if (stab[i] >= 32) - return false; - } return true; } diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index baa977247dc9..2f87c1ba13de 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -28,7 +28,6 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * * @list: Used internally * @kind: Identifier - * @netns_refund: Physical device, move to init_net on netns exit * @maxtype: Highest device specific netlink attribute number * @policy: Netlink policy for device specific attribute validation * @validate: Optional validation function for netlink/changelink parameters @@ -82,7 +81,6 @@ struct rtnl_link_ops { unsigned int (*get_num_tx_queues)(void); unsigned int (*get_num_rx_queues)(void); - bool netns_refund; int slave_maxtype; const struct nla_policy *slave_policy; int (*slave_validate)(struct nlattr *tb[], diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 2eee8ea05a7f..d236ce450da3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -797,7 +797,6 @@ struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; - u16 mpu; u8 linklayer; u8 shift; }; @@ -807,9 +806,6 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, { len += r->overhead; - if (len < r->mpu) - len = r->mpu; - if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; @@ -832,7 +828,6 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, res->rate = min_t(u64, r->rate_bytes_ps, ~0U); res->overhead = r->overhead; - res->mpu = r->mpu; res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 15cfec311500..bf03bab93d9e 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -344,7 +344,8 @@ typedef enum { } sctp_scope_policy_t; /* Based on IPv4 scoping , - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, + * 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -352,6 +353,7 @@ typedef enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ + ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 1d24da658f43..eea9bdeecba2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -469,7 +469,7 @@ struct sctp_af { int saddr); void (*from_sk) (union sctp_addr *, struct sock *sk); - bool (*from_addr_param) (union sctp_addr *, + void (*from_addr_param) (union sctp_addr *, union sctp_addr_param *, __be16 port, int iif); int (*to_addr_param) (const union sctp_addr *, diff --git a/include/net/sock.h b/include/net/sock.h index 693711fead9c..c3abc39a2fd6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -429,10 +429,8 @@ struct sock { #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) __u32 sk_cgrp_prioidx; #endif - spinlock_t sk_peer_lock; struct pid *sk_peer_pid; const struct cred *sk_peer_cred; - long sk_rcvtimeo; long sk_sndtimeo; struct timer_list sk_timer; @@ -1740,8 +1738,7 @@ static inline u32 net_tx_rndhash(void) static inline void sk_set_txhash(struct sock *sk) { - /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */ - WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); + sk->sk_txhash = net_tx_rndhash(); } static inline void sk_rethink_txhash(struct sock *sk) @@ -1993,12 +1990,9 @@ static inline void sock_poll_wait(struct file *filp, static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) { - /* This pairs with WRITE_ONCE() in sk_set_txhash() */ - u32 txhash = READ_ONCE(sk->sk_txhash); - - if (txhash) { + if (sk->sk_txhash) { skb->l4_hash = 1; - skb->hash = txhash; + skb->hash = sk->sk_txhash; } } diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 004bf0ca8884..e59180264591 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -256,7 +256,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *, struct fc_frame *); /* libfcoe funcs */ -u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int); +u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int); int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *, const struct libfc_function_template *, int init_fcp); u32 fcoe_fc_crc(struct fc_frame *fp); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index e673c7c9c5fb..6183d20a01fb 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -437,8 +437,6 @@ extern void iscsi_free_session(struct iscsi_cls_session *session); extern int iscsi_destroy_session(struct iscsi_cls_session *session); extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, int dd_size, uint32_t cid); -extern void iscsi_put_conn(struct iscsi_cls_conn *conn); -extern void iscsi_get_conn(struct iscsi_cls_conn *conn); extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); extern void iscsi_unblock_session(struct iscsi_cls_session *session); extern void iscsi_block_session(struct iscsi_cls_session *session); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 7b08d0c5b662..0eed9fd79ea5 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -181,10 +181,6 @@ enum tcm_sense_reason_table { TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), - TCM_TOO_MANY_TARGET_DESCS = R(0x19), - TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a), - TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b), - TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c), #undef R }; @@ -787,9 +783,8 @@ struct se_device { atomic_long_t read_bytes; atomic_long_t write_bytes; /* Active commands on this virtual SE device */ - atomic_t non_ordered; - bool ordered_sync_in_progress; - atomic_t delayed_cmd_count; + atomic_t simple_cmds; + atomic_t dev_ordered_sync; atomic_t dev_qf_count; u32 export_count; spinlock_t delayed_cmd_lock; @@ -812,7 +807,6 @@ struct se_device { struct list_head dev_tmr_list; struct workqueue_struct *tmr_wq; struct work_struct qf_work_queue; - struct work_struct delayed_cmd_work; struct list_head delayed_cmd_list; struct list_head state_list; struct list_head qf_cmd_list; diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index ba8c415771b7..e9e7abf7df44 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -1,3 +1,8 @@ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #undef TRACE_SYSTEM #define TRACE_SYSTEM kmem @@ -214,6 +219,34 @@ TRACE_EVENT(mm_page_free_batched, __entry->cold) ); +TRACE_EVENT(mm_page_alloc_highorder, + + TP_PROTO(struct page *page, unsigned int order, + gfp_t gfp_flags, int migratetype), + + TP_ARGS(page, order, gfp_flags, migratetype), + + TP_STRUCT__entry(__field(struct page *, page) + __field(unsigned int, order) + __field(gfp_t, gfp_flags) + __field(int, migratetype) + ), + + TP_fast_assign( + __entry->page = page; + __entry->order = order; + __entry->gfp_flags = gfp_flags; + __entry->migratetype = migratetype; + ), + + TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", + __entry->page, + page_to_pfn(__entry->page), + __entry->order, + __entry->migratetype, + show_gfp_flags(__entry->gfp_flags)) +); + TRACE_EVENT(mm_page_alloc, TP_PROTO(struct page *page, unsigned int order, @@ -310,6 +343,28 @@ TRACE_EVENT_CONDITION(mm_page_pcpu_drain, __entry->order, __entry->migratetype) ); +TRACE_EVENT(mm_page_alloc_fail, + + TP_PROTO(int alloc_order, gfp_t gfp_mask), + + TP_ARGS(alloc_order, gfp_mask), + + TP_STRUCT__entry( + __field(int, alloc_order) + __field(gfp_t, gfp_mask) + ), + + TP_fast_assign( + __entry->alloc_order = alloc_order; + __entry->gfp_mask = gfp_mask; + ), + + TP_printk("alloc_order=%d pageblock_order=%d gfp_mask=%s", + __entry->alloc_order, + pageblock_order, + show_gfp_flags(__entry->gfp_mask)) +); + TRACE_EVENT(mm_page_alloc_extfrag, TP_PROTO(struct page *page, diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1c6c40b92374..73cd7e502d4c 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -8,6 +8,8 @@ #include #include +struct rq; + /* * Tracepoint for calling kthread_stop, performed to end a kthread: */ @@ -50,6 +52,653 @@ TRACE_EVENT(sched_kthread_stop_ret, TP_printk("ret=%d", __entry->ret) ); +/* + * Tracepoint for task enqueue/dequeue: + */ +TRACE_EVENT(sched_enq_deq_task, + + TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed), + + TP_ARGS(p, enqueue, cpus_allowed), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, prio ) + __field( int, cpu ) + __field( bool, enqueue ) + __field(unsigned int, nr_running ) + __field(unsigned long, cpu_load ) + __field(unsigned int, rt_nr_running ) + __field(unsigned int, cpus_allowed ) +#ifdef CONFIG_SCHED_HMP + __field(unsigned int, demand ) + __field(unsigned int, pred_demand ) +#endif + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->prio = p->prio; + __entry->cpu = task_cpu(p); + __entry->enqueue = enqueue; + __entry->nr_running = task_rq(p)->nr_running; + __entry->cpu_load = task_rq(p)->cpu_load[0]; + __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running; + __entry->cpus_allowed = cpus_allowed; +#ifdef CONFIG_SCHED_HMP + __entry->demand = p->ravg.demand; + __entry->pred_demand = p->ravg.pred_demand; +#endif + ), + + TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x" +#ifdef CONFIG_SCHED_HMP + " demand=%u pred_demand=%u" +#endif + , __entry->cpu, + __entry->enqueue ? "enqueue" : "dequeue", + __entry->comm, __entry->pid, + __entry->prio, __entry->nr_running, + __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed +#ifdef CONFIG_SCHED_HMP + , __entry->demand, __entry->pred_demand +#endif + ) +); + +#ifdef CONFIG_SCHED_HMP + +struct group_cpu_time; +struct migration_sum_data; +extern const char *task_event_names[]; +extern const char *migrate_type_names[]; + +TRACE_EVENT(sched_task_load, + + TP_PROTO(struct task_struct *p, bool boost, int reason, + bool sync, bool need_idle, u32 flags, int best_cpu), + + TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field(unsigned int, demand ) + __field( bool, boost ) + __field( int, reason ) + __field( bool, sync ) + __field( bool, need_idle ) + __field( u32, flags ) + __field( int, best_cpu ) + __field( u64, latency ) + __field( int, grp_id ) + __field( u64, avg_burst ) + __field( u64, avg_sleep ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->demand = p->ravg.demand; + __entry->boost = boost; + __entry->reason = reason; + __entry->sync = sync; + __entry->need_idle = need_idle; + __entry->flags = flags; + __entry->best_cpu = best_cpu; + __entry->latency = p->state == TASK_WAKING ? + sched_ktime_clock() - + p->ravg.mark_start : 0; + __entry->grp_id = p->grp ? p->grp->id : 0; + __entry->avg_burst = p->ravg.avg_burst; + __entry->avg_sleep = p->ravg.avg_sleep_time; + ), + + TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x grp=%d best_cpu=%d latency=%llu avg_burst=%llu avg_sleep=%llu", + __entry->pid, __entry->comm, __entry->demand, + __entry->boost, __entry->reason, __entry->sync, + __entry->need_idle, __entry->flags, __entry->grp_id, + __entry->best_cpu, __entry->latency, __entry->avg_burst, + __entry->avg_sleep) +); + +TRACE_EVENT(sched_set_preferred_cluster, + + TP_PROTO(struct related_thread_group *grp, u64 total_demand), + + TP_ARGS(grp, total_demand), + + TP_STRUCT__entry( + __field( int, id ) + __field( u64, demand ) + __field( int, cluster_first_cpu ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field(unsigned int, task_demand ) + ), + + TP_fast_assign( + __entry->id = grp->id; + __entry->demand = total_demand; + __entry->cluster_first_cpu = grp->preferred_cluster ? + cluster_first_cpu(grp->preferred_cluster) + : -1; + ), + + TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d", + __entry->id, __entry->demand, + __entry->cluster_first_cpu) +); + +DECLARE_EVENT_CLASS(sched_cpu_load, + + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + + TP_ARGS(rq, idle, irqload, power_cost, temp), + + TP_STRUCT__entry( + __field(unsigned int, cpu ) + __field(unsigned int, idle ) + __field(unsigned int, nr_running ) + __field(unsigned int, nr_big_tasks ) + __field(unsigned int, load_scale_factor ) + __field(unsigned int, capacity ) + __field( u64, cumulative_runnable_avg ) + __field( u64, irqload ) + __field(unsigned int, max_freq ) + __field(unsigned int, power_cost ) + __field( int, cstate ) + __field( int, dstate ) + __field( int, temp ) + ), + + TP_fast_assign( + __entry->cpu = rq->cpu; + __entry->idle = idle; + __entry->nr_running = rq->nr_running; + __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks; + __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu); + __entry->capacity = cpu_capacity(rq->cpu); + __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg; + __entry->irqload = irqload; + __entry->max_freq = cpu_max_freq(rq->cpu); + __entry->power_cost = power_cost; + __entry->cstate = rq->cstate; + __entry->dstate = rq->cluster->dstate; + __entry->temp = temp; + ), + + TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d", + __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks, + __entry->load_scale_factor, __entry->capacity, + __entry->cumulative_runnable_avg, __entry->irqload, + __entry->max_freq, __entry->power_cost, __entry->cstate, + __entry->dstate, __entry->temp) +); + +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + +DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup, + TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp), + TP_ARGS(rq, idle, irqload, power_cost, temp) +); + +TRACE_EVENT(sched_set_boost, + + TP_PROTO(int type), + + TP_ARGS(type), + + TP_STRUCT__entry( + __field(int, type ) + ), + + TP_fast_assign( + __entry->type = type; + ), + + TP_printk("type %d", __entry->type) +); + +#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP) +static inline void __window_data(u32 *dst, u32 *src) +{ + if (src) + memcpy(dst, src, nr_cpu_ids * sizeof(u32)); + else + memset(dst, 0, nr_cpu_ids * sizeof(u32)); +} + +struct trace_seq; +const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len) +{ + int i; + const char *ret = p->buffer + seq_buf_used(&p->seq); + + for (i = 0; i < buf_len; i++) + trace_seq_printf(p, "%u ", buf[i]); + + trace_seq_putc(p, 0); + + return ret; +} + +static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new) +{ + if (curr) + if (new) + return rq->nt_curr_runnable_sum; + else + return rq->curr_runnable_sum; + else + if (new) + return rq->nt_prev_runnable_sum; + else + return rq->prev_runnable_sum; +} + +static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new) +{ + if (curr) + if (new) + return rq->grp_time.nt_curr_runnable_sum; + else + return rq->grp_time.curr_runnable_sum; + else + if (new) + return rq->grp_time.nt_prev_runnable_sum; + else + return rq->grp_time.prev_runnable_sum; +} + +static inline s64 +__get_update_sum(struct rq *rq, enum migrate_types migrate_type, + bool src, bool new, bool curr) +{ + switch (migrate_type) { + case RQ_TO_GROUP: + if (src) + return __rq_update_sum(rq, curr, new); + else + return __grp_update_sum(rq, curr, new); + case GROUP_TO_RQ: + if (src) + return __grp_update_sum(rq, curr, new); + else + return __rq_update_sum(rq, curr, new); + default: + WARN_ON_ONCE(1); + return -1; + } +} +#endif + +TRACE_EVENT(sched_update_task_ravg, + + TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt, + u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time, + struct group_cpu_time *cpu_time), + + TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( pid_t, cur_pid ) + __field(unsigned int, cur_freq ) + __field( u64, wallclock ) + __field( u64, mark_start ) + __field( u64, delta_m ) + __field( u64, win_start ) + __field( u64, delta ) + __field( u64, irqtime ) + __field(enum task_event, evt ) + __field(unsigned int, demand ) + __field(unsigned int, sum ) + __field( int, cpu ) + __field(unsigned int, pred_demand ) + __field( u64, rq_cs ) + __field( u64, rq_ps ) + __field( u64, grp_cs ) + __field( u64, grp_ps ) + __field( u64, grp_nt_cs ) + __field( u64, grp_nt_ps ) + __field( u32, curr_window ) + __field( u32, prev_window ) + __dynamic_array(u32, curr_sum, nr_cpu_ids ) + __dynamic_array(u32, prev_sum, nr_cpu_ids ) + __field( u64, nt_cs ) + __field( u64, nt_ps ) + __field( u32, active_windows ) + __field( u8, curr_top ) + __field( u8, prev_top ) + ), + + TP_fast_assign( + __entry->wallclock = wallclock; + __entry->win_start = rq->window_start; + __entry->delta = (wallclock - rq->window_start); + __entry->evt = evt; + __entry->cpu = rq->cpu; + __entry->cur_pid = rq->curr->pid; + __entry->cur_freq = cpu_cycles_to_freq(cycles, exec_time); + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->mark_start = p->ravg.mark_start; + __entry->delta_m = (wallclock - p->ravg.mark_start); + __entry->demand = p->ravg.demand; + __entry->sum = p->ravg.sum; + __entry->irqtime = irqtime; + __entry->pred_demand = p->ravg.pred_demand; + __entry->rq_cs = rq->curr_runnable_sum; + __entry->rq_ps = rq->prev_runnable_sum; + __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0; + __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0; + __entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0; + __entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0; + __entry->curr_window = p->ravg.curr_window; + __entry->prev_window = p->ravg.prev_window; + __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu); + __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu); + __entry->nt_cs = rq->nt_curr_runnable_sum; + __entry->nt_ps = rq->nt_prev_runnable_sum; + __entry->active_windows = p->ravg.active_windows; + __entry->curr_top = rq->curr_top; + __entry->prev_top = rq->prev_top; + ), + + TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u", + __entry->wallclock, __entry->win_start, __entry->delta, + task_event_names[__entry->evt], __entry->cpu, + __entry->cur_freq, __entry->cur_pid, + __entry->pid, __entry->comm, __entry->mark_start, + __entry->delta_m, __entry->demand, + __entry->sum, __entry->irqtime, __entry->pred_demand, + __entry->rq_cs, __entry->rq_ps, __entry->curr_window, + __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids), + __entry->prev_window, + __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids), + __entry->nt_cs, __entry->nt_ps, + __entry->active_windows, __entry->grp_cs, + __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps, + __entry->curr_top, __entry->prev_top) +); + +TRACE_EVENT(sched_get_task_cpu_cycles, + + TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time, struct task_struct *p), + + TP_ARGS(cpu, event, cycles, exec_time, p), + + TP_STRUCT__entry( + __field(int, cpu ) + __field(int, event ) + __field(u64, cycles ) + __field(u64, exec_time ) + __field(u32, freq ) + __field(u32, legacy_freq ) + __field(u32, max_freq) + __field(pid_t, pid ) + __array(char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->event = event; + __entry->cycles = cycles; + __entry->exec_time = exec_time; + __entry->freq = cpu_cycles_to_freq(cycles, exec_time); + __entry->legacy_freq = cpu_cur_freq(cpu); + __entry->max_freq = cpu_max_freq(cpu); + __entry->pid = p->pid; + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + ), + + TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u max_freq=%u task=%d (%s)", + __entry->cpu, __entry->event, __entry->cycles, + __entry->exec_time, __entry->freq, __entry->legacy_freq, + __entry->max_freq, __entry->pid, __entry->comm) +); + +TRACE_EVENT(sched_update_history, + + TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples, + enum task_event evt), + + TP_ARGS(rq, p, runtime, samples, evt), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field(unsigned int, runtime ) + __field( int, samples ) + __field(enum task_event, evt ) + __field(unsigned int, demand ) + __field(unsigned int, pred_demand ) + __array( u32, hist, RAVG_HIST_SIZE_MAX) + __field(unsigned int, nr_big_tasks ) + __field( int, cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->runtime = runtime; + __entry->samples = samples; + __entry->evt = evt; + __entry->demand = p->ravg.demand; + __entry->pred_demand = p->ravg.pred_demand; + memcpy(__entry->hist, p->ravg.sum_history, + RAVG_HIST_SIZE_MAX * sizeof(u32)); + __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks; + __entry->cpu = rq->cpu; + ), + + TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u" + " (hist: %u %u %u %u %u) cpu %d nr_big %u", + __entry->pid, __entry->comm, + __entry->runtime, __entry->samples, + task_event_names[__entry->evt], + __entry->demand, __entry->pred_demand, + __entry->hist[0], __entry->hist[1], + __entry->hist[2], __entry->hist[3], + __entry->hist[4], __entry->cpu, __entry->nr_big_tasks) +); + +TRACE_EVENT(sched_reset_all_window_stats, + + TP_PROTO(u64 window_start, u64 window_size, u64 time_taken, + int reason, unsigned int old_val, unsigned int new_val), + + TP_ARGS(window_start, window_size, time_taken, + reason, old_val, new_val), + + TP_STRUCT__entry( + __field( u64, window_start ) + __field( u64, window_size ) + __field( u64, time_taken ) + __field( int, reason ) + __field(unsigned int, old_val ) + __field(unsigned int, new_val ) + ), + + TP_fast_assign( + __entry->window_start = window_start; + __entry->window_size = window_size; + __entry->time_taken = time_taken; + __entry->reason = reason; + __entry->old_val = old_val; + __entry->new_val = new_val; + ), + + TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u", + __entry->time_taken, __entry->window_start, + __entry->window_size, + sched_window_reset_reasons[__entry->reason], + __entry->old_val, __entry->new_val) +); + +TRACE_EVENT(sched_update_pred_demand, + + TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct, + unsigned int pred_demand), + + TP_ARGS(rq, p, runtime, pct, pred_demand), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field(unsigned int, runtime ) + __field( int, pct ) + __field(unsigned int, pred_demand ) + __array( u8, bucket, NUM_BUSY_BUCKETS) + __field( int, cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->runtime = runtime; + __entry->pct = pct; + __entry->pred_demand = pred_demand; + memcpy(__entry->bucket, p->ravg.busy_buckets, + NUM_BUSY_BUCKETS * sizeof(u8)); + __entry->cpu = rq->cpu; + ), + + TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)", + __entry->pid, __entry->comm, + __entry->runtime, __entry->pct, __entry->cpu, + __entry->pred_demand, __entry->bucket[0], __entry->bucket[1], + __entry->bucket[2], __entry->bucket[3],__entry->bucket[4], + __entry->bucket[5], __entry->bucket[6], __entry->bucket[7], + __entry->bucket[8], __entry->bucket[9]) +); + +TRACE_EVENT(sched_migration_update_sum, + + TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq), + + TP_ARGS(p, migrate_type, rq), + + TP_STRUCT__entry( + __field(int, tcpu ) + __field(int, pid ) + __field(enum migrate_types, migrate_type ) + __field( s64, src_cs ) + __field( s64, src_ps ) + __field( s64, dst_cs ) + __field( s64, dst_ps ) + __field( s64, src_nt_cs ) + __field( s64, src_nt_ps ) + __field( s64, dst_nt_cs ) + __field( s64, dst_nt_ps ) + ), + + TP_fast_assign( + __entry->tcpu = task_cpu(p); + __entry->pid = p->pid; + __entry->migrate_type = migrate_type; + __entry->src_cs = __get_update_sum(rq, migrate_type, + true, false, true); + __entry->src_ps = __get_update_sum(rq, migrate_type, + true, false, false); + __entry->dst_cs = __get_update_sum(rq, migrate_type, + false, false, true); + __entry->dst_ps = __get_update_sum(rq, migrate_type, + false, false, false); + __entry->src_nt_cs = __get_update_sum(rq, migrate_type, + true, true, true); + __entry->src_nt_ps = __get_update_sum(rq, migrate_type, + true, true, false); + __entry->dst_nt_cs = __get_update_sum(rq, migrate_type, + false, true, true); + __entry->dst_nt_ps = __get_update_sum(rq, migrate_type, + false, true, false); + ), + + TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld", + __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type], + __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps, + __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps) +); + +TRACE_EVENT(sched_get_busy, + + TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early, bool aggregated), + + TP_ARGS(cpu, load, nload, pload, early, aggregated), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( u64, load ) + __field( u64, nload ) + __field( u64, pload ) + __field( int, early ) + __field( bool, aggregated ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->load = load; + __entry->nload = nload; + __entry->pload = pload; + __entry->early = early; + __entry->aggregated = aggregated; + ), + + TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d aggregated %d", + __entry->cpu, __entry->load, __entry->nload, + __entry->pload, __entry->early, __entry->aggregated) +); + +TRACE_EVENT(sched_freq_alert, + + TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq, + u64 new_load), + + TP_ARGS(cpu, pd_notif, check_groups, rq, new_load), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( int, pd_notif ) + __field( int, check_groups ) + __field( u64, old_busy_time ) + __field( u64, ps ) + __field( u64, new_load ) + __field( u64, old_pred ) + __field( u64, new_pred ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->pd_notif = pd_notif; + __entry->check_groups = check_groups; + __entry->old_busy_time = rq->old_busy_time; + __entry->ps = rq->prev_runnable_sum; + __entry->new_load = new_load; + __entry->old_pred = rq->old_estimated_time; + __entry->new_pred = rq->hmp_stats.pred_demands_sum; + ), + + TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu", + __entry->cpu, __entry->pd_notif, __entry->check_groups, + __entry->old_busy_time, __entry->ps, __entry->new_load, + __entry->old_pred, __entry->new_pred) +); + +#endif /* CONFIG_SCHED_HMP */ + /* * Tracepoint for waking up a task: */ @@ -166,14 +815,16 @@ TRACE_EVENT(sched_switch, */ TRACE_EVENT(sched_migrate_task, - TP_PROTO(struct task_struct *p, int dest_cpu), + TP_PROTO(struct task_struct *p, int dest_cpu, + unsigned int load), - TP_ARGS(p, dest_cpu), + TP_ARGS(p, dest_cpu, load), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) + __field(unsigned int, load ) __field( int, orig_cpu ) __field( int, dest_cpu ) ), @@ -182,12 +833,13 @@ TRACE_EVENT(sched_migrate_task, memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; + __entry->load = load; __entry->orig_cpu = task_cpu(p); __entry->dest_cpu = dest_cpu; ), - TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", - __entry->comm, __entry->pid, __entry->prio, + TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d", + __entry->comm, __entry->pid, __entry->prio, __entry->load, __entry->orig_cpu, __entry->dest_cpu) ); @@ -662,6 +1314,130 @@ TRACE_EVENT(sched_wake_idle_without_ipi, TP_printk("cpu=%d", __entry->cpu) ); +TRACE_EVENT(sched_get_nr_running_avg, + + TP_PROTO(int avg, int big_avg, int iowait_avg, + unsigned int max_nr, unsigned int big_max_nr), + + TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr), + + TP_STRUCT__entry( + __field( int, avg ) + __field( int, big_avg ) + __field( int, iowait_avg ) + __field( unsigned int, max_nr ) + __field( unsigned int, big_max_nr ) + ), + + TP_fast_assign( + __entry->avg = avg; + __entry->big_avg = big_avg; + __entry->iowait_avg = iowait_avg; + __entry->max_nr = max_nr; + __entry->big_max_nr = big_max_nr; + ), + + TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u", + __entry->avg, __entry->big_avg, __entry->iowait_avg, + __entry->max_nr, __entry->big_max_nr) +); + +TRACE_EVENT(core_ctl_eval_need, + + TP_PROTO(unsigned int cpu, unsigned int old_need, + unsigned int new_need, unsigned int updated), + TP_ARGS(cpu, old_need, new_need, updated), + TP_STRUCT__entry( + __field(u32, cpu) + __field(u32, old_need) + __field(u32, new_need) + __field(u32, updated) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->old_need = old_need; + __entry->new_need = new_need; + __entry->updated = updated; + ), + TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu, + __entry->old_need, __entry->new_need, __entry->updated) +); + +TRACE_EVENT(core_ctl_set_busy, + + TP_PROTO(unsigned int cpu, unsigned int busy, + unsigned int old_is_busy, unsigned int is_busy), + TP_ARGS(cpu, busy, old_is_busy, is_busy), + TP_STRUCT__entry( + __field(u32, cpu) + __field(u32, busy) + __field(u32, old_is_busy) + __field(u32, is_busy) + __field(bool, high_irqload) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->busy = busy; + __entry->old_is_busy = old_is_busy; + __entry->is_busy = is_busy; + __entry->high_irqload = sched_cpu_high_irqload(cpu); + ), + TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d", + __entry->cpu, __entry->busy, __entry->old_is_busy, + __entry->is_busy, __entry->high_irqload) +); + +TRACE_EVENT(core_ctl_set_boost, + + TP_PROTO(u32 refcount, s32 ret), + TP_ARGS(refcount, ret), + TP_STRUCT__entry( + __field(u32, refcount) + __field(s32, ret) + ), + TP_fast_assign( + __entry->refcount = refcount; + __entry->ret = ret; + ), + TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret) +); + +/** + * sched_isolate - called when cores are isolated/unisolated + * + * @acutal_mask: mask of cores actually isolated/unisolated + * @req_mask: mask of cores requested isolated/unisolated + * @online_mask: cpu online mask + * @time: amount of time in us it took to isolate/unisolate + * @isolate: 1 if isolating, 0 if unisolating + * + */ +TRACE_EVENT(sched_isolate, + + TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus, + u64 start_time, unsigned char isolate), + + TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate), + + TP_STRUCT__entry( + __field(u32, requested_cpu) + __field(u32, isolated_cpus) + __field(u32, time) + __field(unsigned char, isolate) + ), + + TP_fast_assign( + __entry->requested_cpu = requested_cpu; + __entry->isolated_cpus = isolated_cpus; + __entry->time = div64_u64(sched_clock() - start_time, 1000); + __entry->isolate = isolate; + ), + + TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d", + __entry->requested_cpu, __entry->isolated_cpus, + __entry->time, __entry->isolate) +); + TRACE_EVENT(sched_preempt_disable, TP_PROTO(u64 delta, bool irqs_disabled, @@ -1212,8 +1988,7 @@ TRACE_EVENT(walt_update_history, __entry->samples = samples; __entry->evt = evt; __entry->demand = p->ravg.demand; - __entry->walt_avg = (__entry->demand << 10); - do_div(__entry->walt_avg, walt_ravg_window); + __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window, __entry->pelt_avg = p->se.avg.util_avg; memcpy(__entry->hist, p->ravg.sum_history, RAVG_HIST_SIZE_MAX * sizeof(u32)); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index d01217407d6d..2609b1c3549e 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -65,9 +65,8 @@ TRACE_EVENT(writeback_dirty_page, ), TP_fast_assign( - strscpy_pad(__entry->name, - bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : - NULL), 32); + strncpy(__entry->name, + mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); __entry->ino = mapping ? mapping->host->i_ino : 0; __entry->index = page->index; ), @@ -96,7 +95,8 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ - strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + strncpy(__entry->name, + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; @@ -205,8 +205,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, ), TP_fast_assign( - strscpy_pad(__entry->name, - bdi_dev_name(inode_to_bdi(inode)), 32); + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); @@ -249,7 +249,8 @@ DECLARE_EVENT_CLASS(writeback_work_class, __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ), TP_fast_assign( - strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + strncpy(__entry->name, + wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; @@ -302,7 +303,7 @@ DECLARE_EVENT_CLASS(writeback_class, __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ), TP_fast_assign( - strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); __trace_wb_assign_cgroup(__get_str(cgroup), wb); ), TP_printk("bdi %s: cgroup=%s", @@ -325,7 +326,7 @@ TRACE_EVENT(writeback_bdi_register, __array(char, name, 32) ), TP_fast_assign( - strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + strncpy(__entry->name, dev_name(bdi->dev), 32); ), TP_printk("bdi %s", __entry->name @@ -350,7 +351,7 @@ DECLARE_EVENT_CLASS(wbc_class, ), TP_fast_assign( - strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); + strncpy(__entry->name, dev_name(bdi->dev), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; @@ -401,7 +402,7 @@ TRACE_EVENT(writeback_queue_io, __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ), TP_fast_assign( - strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); __entry->older = dirtied_before; __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; @@ -486,7 +487,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, ), TP_fast_assign( - strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); @@ -551,7 +552,7 @@ TRACE_EVENT(balance_dirty_pages, TP_fast_assign( unsigned long freerun = (thresh + bg_thresh) / 2; - strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); + strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + @@ -612,8 +613,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue, ), TP_fast_assign( - strscpy_pad(__entry->name, - bdi_dev_name(inode_to_bdi(inode)), 32); + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; @@ -686,8 +687,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, ), TP_fast_assign( - strscpy_pad(__entry->name, - bdi_dev_name(inode_to_bdi(inode)), 32); + strncpy(__entry->name, + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h index 03c3e1869be7..c872bfd25e13 100644 --- a/include/uapi/linux/const.h +++ b/include/uapi/linux/const.h @@ -24,9 +24,4 @@ #define _BITUL(x) (_AC(1,UL) << (x)) #define _BITULL(x) (_AC(1,ULL) << (x)) -#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) -#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) - -#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) - #endif /* !(_LINUX_CONST_H) */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 7eb9178e3666..5ad57375a99f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -218,7 +218,6 @@ enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_EUI64, IN6_ADDR_GEN_MODE_NONE, IN6_ADDR_GEN_MODE_STABLE_PRIVACY, - IN6_ADDR_GEN_MODE_RANDOM, }; /* Bridge section */ diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 894dee25553b..e0172baf24aa 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -282,8 +282,7 @@ #define KEY_PAUSECD 201 #define KEY_PROG3 202 #define KEY_PROG4 203 -#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */ -#define KEY_DASHBOARD KEY_ALL_APPLICATIONS +#define KEY_DASHBOARD 204 /* AL Dashboard */ #define KEY_SUSPEND 205 #define KEY_CLOSE 206 /* AC Close */ #define KEY_PLAY 207 diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index 4acefd697677..928f98997d8a 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -20,7 +20,7 @@ #define _UAPI_LINUX_LIGHTNVM_H #ifdef __KERNEL__ -#include +#include #include #else /* __KERNEL__ */ #include diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index 19067cecc0c8..5bd6dec4ff13 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h @@ -9,9 +9,7 @@ * The MS-DOS filesystem constants/structures */ -#ifndef SECTOR_SIZE #define SECTOR_SIZE 512 /* sector size (bytes) */ -#endif #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */ #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */ diff --git a/include/uapi/linux/netfilter/nfnetlink_cthelper.h b/include/uapi/linux/netfilter/nfnetlink_cthelper.h index 30557bade935..33659f6fad3e 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cthelper.h +++ b/include/uapi/linux/netfilter/nfnetlink_cthelper.h @@ -4,7 +4,7 @@ #define NFCT_HELPER_STATUS_DISABLED 0 #define NFCT_HELPER_STATUS_ENABLED 1 -enum nfnl_cthelper_msg_types { +enum nfnl_acct_msg_types { NFNL_MSG_CTHELPER_NEW, NFNL_MSG_CTHELPER_GET, NFNL_MSG_CTHELPER_DEL, diff --git a/include/uapi/linux/netfilter/x_tables.h b/include/uapi/linux/netfilter/x_tables.h index 8f40c2fe0ed4..c36969b91533 100644 --- a/include/uapi/linux/netfilter/x_tables.h +++ b/include/uapi/linux/netfilter/x_tables.h @@ -1,6 +1,6 @@ #ifndef _UAPI_X_TABLES_H #define _UAPI_X_TABLES_H -#include +#include #include #define XT_FUNCTION_MAXNAMELEN 30 diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 8edc617e566a..a9d1772199bf 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -1,7 +1,7 @@ #ifndef _UAPI__LINUX_NETLINK_H #define _UAPI__LINUX_NETLINK_H -#include +#include #include /* for __kernel_sa_family_t */ #include diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 1b6d54a328ba..399f39ff8048 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -261,7 +261,7 @@ enum nfc_sdp_attr { #define NFC_SE_ENABLED 0x1 struct sockaddr_nfc { - __kernel_sa_family_t sa_family; + sa_family_t sa_family; __u32 dev_idx; __u32 target_idx; __u32 nfc_protocol; @@ -269,14 +269,14 @@ struct sockaddr_nfc { #define NFC_LLCP_MAX_SERVICE_NAME 63 struct sockaddr_nfc_llcp { - __kernel_sa_family_t sa_family; + sa_family_t sa_family; __u32 dev_idx; __u32 target_idx; __u32 nfc_protocol; __u8 dsap; /* Destination SAP, if known */ __u8 ssap; /* Source SAP to be bound to */ char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */; - __kernel_size_t service_name_len; + size_t service_name_len; }; /* NFC socket protocols */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 04bd75b0f1f2..eb3c786afa70 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -488,12 +488,6 @@ #define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ #define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ #define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ -#define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */ -#define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */ -#define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */ -#define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */ -#define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */ -#define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */ #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ #define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ #define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ diff --git a/include/uapi/linux/qbg-profile.h b/include/uapi/linux/qbg-profile.h deleted file mode 100644 index a30f4dbba9c6..000000000000 --- a/include/uapi/linux/qbg-profile.h +++ /dev/null @@ -1,55 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ - -#ifndef __QBG_PROFILE_H__ -#define __QBG_PROFILE_H__ - -#define MAX_BP_LUT_ROWS 35 -#define MAX_BP_LUT_COLS 8 -#define MAX_PROFILE_NAME_LENGTH 256 - -enum profile_table_type { - CHARGE_TABLE = 0, - DISCHARGE_TABLE, -}; - -struct battery_data_table { - unsigned short int table[MAX_BP_LUT_ROWS][MAX_BP_LUT_COLS]; - int unit_conv_factor[MAX_BP_LUT_COLS]; - unsigned short int nrows; - unsigned short int ncols; -}; - -struct battery_config { - char bp_profile_name[MAX_PROFILE_NAME_LENGTH]; - int bp_batt_id; - int capacity; - int bp_checksum; - int soh_range_high; - int soh_range_low; - int normal_impedance; - int aged_impedance; - int normal_capacity; - int aged_capacity; - int recharge_soc_delta; - int recharge_vflt_delta; - int recharge_iterm; -}; - -struct battery_profile_table { - enum profile_table_type table_type; - int table_index; - struct battery_data_table *table; -}; - -/* IOCTLs to query battery profile data */ -/* Battery configuration */ -#define BPIOCXBP \ - _IOWR('B', 0x01, struct battery_config) -/* Battery profile table */ -#define BPIOCXBPTABLE \ - _IOWR('B', 0x02, struct battery_profile_table) - -#endif diff --git a/include/uapi/linux/qbg.h b/include/uapi/linux/qbg.h deleted file mode 100644 index d8e335be2f54..000000000000 --- a/include/uapi/linux/qbg.h +++ /dev/null @@ -1,179 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ - -#ifndef __QBG_H__ -#define __QBG_H__ - -#define MAX_FIFO_COUNT 36 -#define QBG_MAX_STEP_CHG_ENTRIES 6 - -enum QBG_STATE { - QBG_LPM, - QBG_MPM, - QBG_HPM, - QBG_FAST_CHAR, - QBG_PON_OCV, - QBG_STATE_MAX, -}; - -enum QBG_SDAM_DATA_OFFSET { - QBG_ACC0_OFFSET = 0, - QBG_ACC1_OFFSET = 2, - QBG_ACC2_OFFSET = 4, - QBG_TBAT_OFFSET = 6, - QBG_IBAT_OFFSET = 8, - QBG_VREF_OFFSET = 10, - QBG_DATA_TAG_OFFSET = 12, - QBG_QG_STS_OFFSET, - QBG_STS1_OFFSET, - QBG_STS2_OFFSET, - QBG_STS3_OFFSET, - QBG_ONE_FIFO_LENGTH, -}; - -enum qbg { - QBG_PARAM_SOC, - QBG_PARAM_BATT_SOC, - QBG_PARAM_SYS_SOC, - QBG_PARAM_ESR, - QBG_PARAM_OCV_UV, - QBG_PARAM_MAX_LOAD_NOW, - QBG_PARAM_MAX_LOAD_AVG, - QBG_PARAM_HOLD_SOC_100PCT, - QBG_PARAM_CHARGE_CYCLE_COUNT, - QBG_PARAM_LEARNED_CAPACITY, - QBG_PARAM_TTF_100MS, - QBG_PARAM_TTE_100MS, - QBG_PARAM_SOH, - QBG_PARAM_TBAT, - QBG_PARAM_SYS_SOC_HOLD_100PCT, - QBG_PARAM_JEITA_COOL_THRESHOLD, - QBG_PARAM_TOTAL_IMPEDANCE, - QBG_PARAM_ESSENTIAL_PARAM_REVID, - QBG_PARAM_FIFO_TIMESTAMP, - QBG_PARAM_MAX, -}; - -struct qbg_essential_params { - short int msoc; - short int cutoff_soc; - short int full_soc; - short int x0; - short int x1; - short int x2; - short int soh_r; - short int soh_c; - short int theta0; - short int theta1; - short int theta2; - short int i1full; - short int i2full; - short int i1cutoff; - short int i2cutoff; - short int syssoc; - int discharge_cycle_count; - int charge_cycle_count; - unsigned int rtc_time; - short int batt_therm; - unsigned short int ocv; -} __attribute__ ((__packed__)); - -struct fifo_data { - unsigned short int v1; - unsigned short int v2; - unsigned short int i; - unsigned short int tbat; - unsigned short int ibat; - unsigned short int vref; - char data_tag; - char qg_sts; - char sts1; - char sts2; - char sts3; -} __attribute__ ((__packed__)); - -struct k_fifo_data { - unsigned int v1; - unsigned int v2; - unsigned int i; - unsigned int tbat; - unsigned int ibat; - unsigned int vref; - unsigned int data_tag; - unsigned int qg_sts; - unsigned int sts1; - unsigned int sts2; - unsigned int sts3; -} __attribute__ ((__packed__)); - -struct qbg_config { - unsigned int batt_id; - unsigned int pon_ocv; - unsigned int pon_ibat; - unsigned int pon_tbat; - unsigned int pon_soc; - unsigned int float_volt_uv; - unsigned int fastchg_curr_ma; - unsigned int vbat_cutoff_mv; - unsigned int ibat_cutoff_ma; - unsigned int vph_min_mv; - unsigned int iterm_ma; - unsigned int rconn_mohm; - unsigned long current_time; - unsigned int sdam_batt_id; - unsigned int essential_param_revid; - unsigned long sample_time_us[QBG_STATE_MAX]; -} __attribute__ ((__packed__)); - -struct qbg_param { - unsigned int data; - _Bool valid; -}; - -struct qbg_kernel_data { - unsigned int seq_no; - unsigned int fifo_time; - unsigned int fifo_count; - struct k_fifo_data fifo[MAX_FIFO_COUNT]; - struct qbg_param param[QBG_PARAM_MAX]; -} __attribute__ ((__packed__)); - -struct qbg_user_data { - struct qbg_param param[QBG_PARAM_MAX]; -} __attribute__ ((__packed__)); - -struct range_data { - int low_threshold; - int high_threshold; - unsigned int value; -} __attribute__ ((__packed__)); - -struct ranges { - struct range_data data[QBG_MAX_STEP_CHG_ENTRIES]; - unsigned char range_count; - _Bool valid; -} __attribute__((__packed__)); - -struct qbg_step_chg_jeita_params { - int jeita_full_fv_10nv; - int jeita_full_iterm_10na; - int jeita_warm_adc_value; - int jeita_cool_adc_value; - int battery_beta; - int battery_therm_kohm; - struct ranges step_fcc_cfg; - struct ranges jeita_fcc_cfg; - struct ranges jeita_fv_cfg; - unsigned char ttf_calc_mode; -} __attribute__ ((__packed__)); - -/* IOCTLs to read & write QBG config and essential params */ -#define QBGIOCXCFG _IOR('B', 0x01, struct qbg_config) -#define QBGIOCXEPR _IOR('B', 0x02, struct qbg_essential_params) -#define QBGIOCXEPW _IOWR('B', 0x03, struct qbg_essential_params) -#define QBGIOCXSTEPCHGCFG \ - _IOWR('B', 0x04, struct qbg_step_chg_jeita_params) - -#endif diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index 5bcc637cee46..1e5ac4e776da 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -61,7 +61,6 @@ * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 * TI16C752: 8 16 56 60 8 16 32 56 - * OX16C950: 16 32 112 120 16 32 64 112 PORT_16C950 * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA */ #define UART_FCR_R_TRIG_00 0x00 diff --git a/include/uapi/linux/slatecom_interface.h b/include/uapi/linux/slatecom_interface.h deleted file mode 100644 index 1caed8c42bf6..000000000000 --- a/include/uapi/linux/slatecom_interface.h +++ /dev/null @@ -1,93 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. - */ -#ifndef LINUX_SLATECOM_INTERFACE_H -#define LINUX_SLATECOM_INTERFACE_H - -#include - -#define SLATECOM_REG_READ 0 -#define SLATECOM_AHB_READ 1 -#define SLATECOM_AHB_WRITE 2 -#define SLATECOM_SET_SPI_FREE 3 -#define SLATECOM_SET_SPI_BUSY 4 -#define SLATECOM_REG_WRITE 5 -#define SLATECOM_SOFT_RESET 6 -#define SLATECOM_MODEM_DOWN2_SLATE 7 -#define SLATECOM_TWM_EXIT 8 -#define SLATECOM_SLATE_APP_RUNNING 9 -#define SLATECOM_ADSP_DOWN2_SLATE 10 -#define SLATECOM_SLATE_WEAR_LOAD 11 -#define SLATECOM_SLATE_WEAR_UNLOAD 12 -#define EXCHANGE_CODE 'V' - -struct slate_ui_data { - __u64 __user write; - __u64 __user result; - __u32 slate_address; - __u32 cmd; - __u32 num_of_words; - __u8 __user *buffer; -}; - -enum slate_event_type { - SLATE_BEFORE_POWER_DOWN = 1, - SLATE_AFTER_POWER_DOWN, - SLATE_BEFORE_POWER_UP, - SLATE_AFTER_POWER_UP, - MODEM_BEFORE_POWER_DOWN, - MODEM_AFTER_POWER_UP, - ADSP_BEFORE_POWER_DOWN, - ADSP_AFTER_POWER_UP, - TWM_SLATE_AFTER_POWER_UP, - SLATE_DSP_ERROR, - SLATE_DSP_READY, - SLATE_BT_ERROR, - SLATE_BT_READY, -}; - -#define SLATE_AFTER_POWER_UP SLATE_AFTER_POWER_UP - -#define REG_READ \ - _IOWR(EXCHANGE_CODE, SLATECOM_REG_READ, \ - struct slate_ui_data) -#define AHB_READ \ - _IOWR(EXCHANGE_CODE, SLATECOM_AHB_READ, \ - struct slate_ui_data) -#define AHB_WRITE \ - _IOW(EXCHANGE_CODE, SLATECOM_AHB_WRITE, \ - struct slate_ui_data) -#define SET_SPI_FREE \ - _IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_FREE, \ - struct slate_ui_data) -#define SET_SPI_BUSY \ - _IOR(EXCHANGE_CODE, SLATECOM_SET_SPI_BUSY, \ - struct slate_ui_data) -#define REG_WRITE \ - _IOWR(EXCHANGE_CODE, SLATECOM_REG_WRITE, \ - struct slate_ui_data) -#define SLATE_SOFT_RESET \ - _IOWR(EXCHANGE_CODE, SLATECOM_SOFT_RESET, \ - struct slate_ui_data) -#define SLATE_TWM_EXIT \ - _IOWR(EXCHANGE_CODE, SLATECOM_TWM_EXIT, \ - struct slate_ui_data) -#define SLATE_APP_RUNNING \ - _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_APP_RUNNING, \ - struct slate_ui_data) -#define SLATE_MODEM_DOWN2_SLATE_DONE \ - _IOWR(EXCHANGE_CODE, SLATECOM_MODEM_DOWN2_SLATE, \ - struct slate_ui_data) -#define SLATE_WEAR_LOAD \ - _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_WEAR_LOAD, \ - struct slate_ui_data) -#define SLATE_WEAR_UNLOAD \ - _IOWR(EXCHANGE_CODE, SLATECOM_SLATE_WEAR_UNLOAD, \ - struct slate_ui_data) -#define SLATE_ADSP_DOWN2_SLATE_DONE \ - _IOWR(EXCHANGE_CODE, SLATECOM_ADSP_DOWN2_SLATE, \ - struct slate_ui_data) - -#endif /* LINUX_SLATECOM_INTERFACE_H */ - diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index a1ae3778570f..47e0de1df362 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -22,7 +22,7 @@ #ifndef _UAPI_LINUX_SYSCTL_H #define _UAPI_LINUX_SYSCTL_H -#include +#include #include #include diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index a055762bcea2..7778723e4405 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -333,9 +333,6 @@ struct usb_config_descriptor { /*-------------------------------------------------------------------------*/ -/* USB String descriptors can contain at most 126 characters. */ -#define USB_MAX_STRING_LEN 126 - /* USB_DT_STRING: String descriptor */ struct usb_string_descriptor { __u8 bLength; diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index f9d8aac170fb..34b1379f9777 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -157,7 +157,6 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, map->flags = flags; map->ref = ref; map->dom = domid; - map->status = 1; /* arbitrary positive value */ } static inline void diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 4076d1c407d8..7dc685b4057d 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -24,79 +24,82 @@ typedef unsigned int RING_IDX; * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ -#define __CONST_RING_SIZE(_s, _sz) \ - (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ - sizeof(((struct _s##_sring *)0)->ring[0]))) +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) + /* * The same for passing in an actual pointer instead of a name tag. */ -#define __RING_SIZE(_s, _sz) \ - (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. * * To make a new ring datatype, you need to have two message structures, - * let's say request_t, and response_t already defined. + * let's say struct request, and struct response already defined. * * In a header where you want the ring datatype declared, you then do: * - * DEFINE_RING_TYPES(mytag, request_t, response_t); + * DEFINE_RING_TYPES(mytag, struct request, struct response); * * These expand out to give you a set of types, as you can see below. * The most important of these are: * - * mytag_sring_t - The shared ring. - * mytag_front_ring_t - The 'front' half of the ring. - * mytag_back_ring_t - The 'back' half of the ring. + * struct mytag_sring - The shared ring. + * struct mytag_front_ring - The 'front' half of the ring. + * struct mytag_back_ring - The 'back' half of the ring. * * To initialize a ring in your code you need to know the location and size * of the shared memory area (PAGE_SIZE, for instance). To initialise * the front half: * - * mytag_front_ring_t front_ring; - * SHARED_RING_INIT((mytag_sring_t *)shared_page); - * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * struct mytag_front_ring front_ring; + * SHARED_RING_INIT((struct mytag_sring *)shared_page); + * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page, + * PAGE_SIZE); * * Initializing the back follows similarly (note that only the front * initializes the shared ring): * - * mytag_back_ring_t back_ring; - * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * struct mytag_back_ring back_ring; + * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page, + * PAGE_SIZE); */ -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ - \ -/* Shared ring entry */ \ -union __name##_sring_entry { \ - __req_t req; \ - __rsp_t rsp; \ -}; \ - \ -/* Shared ring page */ \ -struct __name##_sring { \ - RING_IDX req_prod, req_event; \ - RING_IDX rsp_prod, rsp_event; \ - uint8_t __pad[48]; \ - union __name##_sring_entry ring[1]; /* variable-length */ \ -}; \ - \ -/* "Front" end's private variables */ \ -struct __name##_front_ring { \ - RING_IDX req_prod_pvt; \ - RING_IDX rsp_cons; \ - unsigned int nr_ents; \ - struct __name##_sring *sring; \ -}; \ - \ -/* "Back" end's private variables */ \ -struct __name##_back_ring { \ - RING_IDX rsp_prod_pvt; \ - RING_IDX req_cons; \ - unsigned int nr_ents; \ - struct __name##_sring *sring; \ -}; \ - \ +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + uint8_t pad[48]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; + /* * Macros for manipulating rings. * @@ -113,99 +116,105 @@ struct __name##_back_ring { \ */ /* Initialising empty rings */ -#define SHARED_RING_INIT(_s) do { \ - (_s)->req_prod = (_s)->rsp_prod = 0; \ - (_s)->req_event = (_s)->rsp_event = 1; \ - (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + memset((_s)->pad, 0, sizeof((_s)->pad)); \ } while(0) -#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ - (_r)->req_prod_pvt = (_i); \ - (_r)->rsp_cons = (_i); \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ - (_r)->sring = (_s); \ +#define FRONT_RING_INIT(_r, _s, __size) do { \ + (_r)->req_prod_pvt = 0; \ + (_r)->rsp_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ } while (0) -#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) +#define BACK_RING_INIT(_r, _s, __size) do { \ + (_r)->rsp_prod_pvt = 0; \ + (_r)->req_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) -#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ - (_r)->rsp_prod_pvt = (_i); \ - (_r)->req_cons = (_i); \ - (_r)->nr_ents = __RING_SIZE(_s, __size); \ - (_r)->sring = (_s); \ +/* Initialize to existing shared indexes -- for recovery */ +#define FRONT_RING_ATTACH(_r, _s, __size) do { \ + (_r)->sring = (_s); \ + (_r)->req_prod_pvt = (_s)->req_prod; \ + (_r)->rsp_cons = (_s)->rsp_prod; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ } while (0) -#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) +#define BACK_RING_ATTACH(_r, _s, __size) do { \ + (_r)->sring = (_s); \ + (_r)->rsp_prod_pvt = (_s)->rsp_prod; \ + (_r)->req_cons = (_s)->req_prod; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ +} while (0) /* How big is this ring? */ -#define RING_SIZE(_r) \ +#define RING_SIZE(_r) \ ((_r)->nr_ents) /* Number of free requests (for use on front side only). */ -#define RING_FREE_REQUESTS(_r) \ +#define RING_FREE_REQUESTS(_r) \ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) /* Test if there is an empty slot available on the front ring. * (This is only meaningful from the front. ) */ -#define RING_FULL(_r) \ +#define RING_FULL(_r) \ (RING_FREE_REQUESTS(_r) == 0) /* Test if there are outstanding messages to be processed on a ring. */ -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ ((_r)->sring->rsp_prod - (_r)->rsp_cons) -#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ - unsigned int rsp = RING_SIZE(_r) - \ - ((_r)->req_cons - (_r)->rsp_prod_pvt); \ - req < rsp ? req : rsp; \ -}) +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ + }) /* Direct access to individual ring elements, by index. */ -#define RING_GET_REQUEST(_r, _idx) \ +#define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) -#define RING_GET_RESPONSE(_r, _idx) \ - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) - /* - * Get a local copy of a request/response. + * Get a local copy of a request. * - * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is + * Use this in preference to RING_GET_REQUEST() so all processing is * done on a local copy that cannot be modified by the other end. * * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this - * to be ineffective where dest is a struct which consists of only bitfields. + * to be ineffective where _req is a struct which consists of only bitfields. */ -#define RING_COPY_(type, r, idx, dest) do { \ - /* Use volatile to force the copy into dest. */ \ - *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \ +#define RING_COPY_REQUEST(_r, _idx, _req) do { \ + /* Use volatile to force the copy into _req. */ \ + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ } while (0) -#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) -#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) /* Loop termination condition: Would the specified index overflow the ring? */ -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) /* Ill-behaved frontend determination: Can there be this many requests? */ -#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) -/* Ill-behaved backend determination: Can there be this many responses? */ -#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ - (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) -#define RING_PUSH_REQUESTS(_r) do { \ - wmb(); /* back sees requests /before/ updated producer index */ \ - (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +#define RING_PUSH_REQUESTS(_r) do { \ + wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) -#define RING_PUSH_RESPONSES(_r) do { \ - wmb(); /* front sees resps /before/ updated producer index */ \ - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +#define RING_PUSH_RESPONSES(_r) do { \ + wmb(); /* front sees responses /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) /* @@ -238,40 +247,40 @@ struct __name##_back_ring { \ * field appropriately. */ -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ - RING_IDX __old = (_r)->sring->req_prod; \ - RING_IDX __new = (_r)->req_prod_pvt; \ - wmb(); /* back sees requests /before/ updated producer index */ \ - (_r)->sring->req_prod = __new; \ - mb(); /* back sees new requests /before/ we check req_event */ \ - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ - (RING_IDX)(__new - __old)); \ +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = __new; \ + mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ } while (0) -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ - RING_IDX __old = (_r)->sring->rsp_prod; \ - RING_IDX __new = (_r)->rsp_prod_pvt; \ - wmb(); /* front sees resps /before/ updated producer index */ \ - (_r)->sring->rsp_prod = __new; \ - mb(); /* front sees new resps /before/ we check rsp_event */ \ - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ - (RING_IDX)(__new - __old)); \ +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + wmb(); /* front sees responses /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + mb(); /* front sees new responses /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ } while (0) -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ - if (_work_to_do) break; \ - (_r)->sring->req_event = (_r)->req_cons + 1; \ - mb(); \ - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ - if (_work_to_do) break; \ - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ - mb(); \ - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) #endif /* __XEN_PUBLIC_IO_RING_H__ */ diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index ed9e7e3307b7..32b944b7cebd 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -58,15 +58,6 @@ struct xenbus_watch /* Path being watched. */ const char *node; - unsigned int nr_pending; - - /* - * Called just before enqueing new event while a spinlock is held. - * The event will be discarded if this callback returns false. - */ - bool (*will_handle)(struct xenbus_watch *, - const char **vec, unsigned int len); - /* Callback (executed in a process context with no locks held). */ void (*callback)(struct xenbus_watch *, const char **vec, unsigned int len); @@ -203,14 +194,10 @@ void xenbus_suspend_cancel(void); int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, - bool (*will_handle)(struct xenbus_watch *, - const char **, unsigned int), void (*callback)(struct xenbus_watch *, const char **, unsigned int)); -__printf(5, 6) +__printf(4, 5) int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, - bool (*will_handle)(struct xenbus_watch *, - const char **, unsigned int), void (*callback)(struct xenbus_watch *, const char **, unsigned int), const char *pathfmt, ...); diff --git a/init/Kconfig b/init/Kconfig index 888a1d6d36e9..b62a77075013 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -65,7 +65,7 @@ config CROSS_COMPILE config COMPILE_TEST bool "Compile also drivers which will not load" - depends on HAS_IOMEM + default n help Some drivers can be compiled on a different platform than they are intended to be run on. Despite they cannot be loaded there (or even @@ -1219,6 +1219,33 @@ config CGROUP_WRITEBACK endif # CGROUPS +config SCHED_HMP + bool "Scheduler support for heterogenous multi-processor systems" + depends on SMP && FAIR_GROUP_SCHED + help + This feature will let the scheduler optimize task placement on + systems made of heterogeneous cpus i.e cpus that differ either + in their instructions per-cycle capability or the maximum + frequency they can attain. + +config SCHED_HMP_CSTATE_AWARE + bool "CPU C-state aware scheduler" + depends on SCHED_HMP + help + This feature will let the HMP scheduler optimize task placement + with CPUs C-state. If this is enabled, scheduler places tasks + onto the shallowest C-state CPU among the most power efficient CPUs. + +config SCHED_CORE_CTL + bool "QTI Core Control" + depends on SMP + help + This options enables the core control functionality in + the scheduler. Core control automatically offline and + online cores based on cpu load and utilization. + + If unsure, say N here. + config CHECKPOINT_RESTORE bool "Checkpoint/restore support" if EXPERT select PROC_CHILDREN @@ -1723,16 +1750,6 @@ config ADVISE_SYSCALLS applications use these syscalls, you can disable this option to save space. -config BPF_UNPRIV_DEFAULT_OFF - bool "Disable unprivileged BPF by default" - depends on BPF_SYSCALL - help - Disables unprivileged BPF by default by setting the corresponding - /proc/sys/kernel/unprivileged_bpf_disabled knob to 2. An admin can - still reenable it by setting it to 0 later on, or permanently - disable it by setting it to 1 (from which no other transition to - 0 is possible anymore). - config USERFAULTFD bool "Enable userfaultfd() system call" select ANON_INODES diff --git a/init/main.c b/init/main.c index 6fce605c6caf..48e8370966ce 100644 --- a/init/main.c +++ b/init/main.c @@ -1016,7 +1016,7 @@ static noinline void __init kernel_init_freeable(void) */ set_cpus_allowed_ptr(current, cpu_all_mask); - cad_pid = get_pid(task_pid(current)); + cad_pid = task_pid(current); smp_prepare_cpus(setup_max_cpus); diff --git a/init/version.c b/init/version.c index 5606341e9efd..fe41a63efed6 100644 --- a/init/version.c +++ b/init/version.c @@ -23,7 +23,9 @@ int version_string(LINUX_VERSION_CODE); #endif struct uts_namespace init_uts_ns = { - .kref = KREF_INIT(2), + .kref = { + .refcount = ATOMIC_INIT(2), + }, .name = { .sysname = UTS_SYSNAME, .nodename = UTS_NODENAME, diff --git a/ipc/shm.c b/ipc/shm.c index 41e5e712da1c..5095eb3a6a6c 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -90,7 +90,6 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); - WARN_ON(ns != shp->ns); if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; @@ -181,43 +180,10 @@ static void shm_rcu_free(struct rcu_head *head) ipc_rcu_free(head); } -/* - * It has to be called with shp locked. - * It must be called before ipc_rmid() - */ -static inline void shm_clist_rm(struct shmid_kernel *shp) +static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { - struct task_struct *creator; - - /* ensure that shm_creator does not disappear */ - rcu_read_lock(); - - /* - * A concurrent exit_shm may do a list_del_init() as well. - * Just do nothing if exit_shm already did the work - */ - if (!list_empty(&shp->shm_clist)) { - /* - * shp->shm_creator is guaranteed to be valid *only* - * if shp->shm_clist is not empty. - */ - creator = shp->shm_creator; - - task_lock(creator); - /* - * list_del_init() is a nop if the entry was already removed - * from the list. - */ - list_del_init(&shp->shm_clist); - task_unlock(creator); - } - rcu_read_unlock(); -} - -static inline void shm_rmid(struct shmid_kernel *s) -{ - shm_clist_rm(s); - ipc_rmid(&shm_ids(s->ns), &s->shm_perm); + list_del(&s->shm_clist); + ipc_rmid(&shm_ids(ns), &s->shm_perm); } @@ -272,7 +238,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; - shm_rmid(shp); + shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); @@ -293,10 +259,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ -static bool shm_may_destroy(struct shmid_kernel *shp) +static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && - (shp->ns->shm_rmid_forced || + (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } @@ -327,7 +293,7 @@ static void shm_close(struct vm_area_struct *vma) shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; - if (shm_may_destroy(shp)) + if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); @@ -348,10 +314,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data) * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ - if (!list_empty(&shp->shm_clist)) + if (shp->shm_creator != NULL) return 0; - if (shm_may_destroy(shp)) { + if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } @@ -369,97 +335,48 @@ void shm_destroy_orphaned(struct ipc_namespace *ns) /* Locking assumes this will only be called with task == current */ void exit_shm(struct task_struct *task) { - for (;;) { - struct shmid_kernel *shp; - struct ipc_namespace *ns; - - task_lock(task); - - if (list_empty(&task->sysvshm.shm_clist)) { - task_unlock(task); - break; - } - - shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel, - shm_clist); - - /* - * 1) Get pointer to the ipc namespace. It is worth to say - * that this pointer is guaranteed to be valid because - * shp lifetime is always shorter than namespace lifetime - * in which shp lives. - * We taken task_lock it means that shp won't be freed. - */ - ns = shp->ns; - - /* - * 2) If kernel.shm_rmid_forced is not set then only keep track of - * which shmids are orphaned, so that a later set of the sysctl - * can clean them up. - */ - if (!ns->shm_rmid_forced) - goto unlink_continue; + struct ipc_namespace *ns = task->nsproxy->ipc_ns; + struct shmid_kernel *shp, *n; - /* - * 3) get a reference to the namespace. - * The refcount could be already 0. If it is 0, then - * the shm objects will be free by free_ipc_work(). - */ - ns = get_ipc_ns_not_zero(ns); - if (!ns) { -unlink_continue: - list_del_init(&shp->shm_clist); - task_unlock(task); - continue; - } - - /* - * 4) get a reference to shp. - * This cannot fail: shm_clist_rm() is called before - * ipc_rmid(), thus the refcount cannot be 0. - */ - WARN_ON(!ipc_rcu_getref(&shp->shm_perm)); + if (list_empty(&task->sysvshm.shm_clist)) + return; + /* + * If kernel.shm_rmid_forced is not set then only keep track of + * which shmids are orphaned, so that a later set of the sysctl + * can clean them up. + */ + if (!ns->shm_rmid_forced) { + down_read(&shm_ids(ns).rwsem); + list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) + shp->shm_creator = NULL; /* - * 5) unlink the shm segment from the list of segments - * created by current. - * This must be done last. After unlinking, - * only the refcounts obtained above prevent IPC_RMID - * from destroying the segment or the namespace. + * Only under read lock but we are only called on current + * so no entry on the list will be shared. */ - list_del_init(&shp->shm_clist); - - task_unlock(task); + list_del(&task->sysvshm.shm_clist); + up_read(&shm_ids(ns).rwsem); + return; + } - /* - * 6) we have all references - * Thus lock & if needed destroy shp. - */ - down_write(&shm_ids(ns).rwsem); - shm_lock_by_ptr(shp); - /* - * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's - * safe to call ipc_rcu_putref here - */ - ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); + /* + * Destroy all already created segments, that were not yet mapped, + * and mark any mapped as orphan to cover the sysctl toggling. + * Destroy is skipped if shm_may_destroy() returns false. + */ + down_write(&shm_ids(ns).rwsem); + list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { + shp->shm_creator = NULL; - if (ipc_valid_object(&shp->shm_perm)) { - if (shm_may_destroy(shp)) - shm_destroy(ns, shp); - else - shm_unlock(shp); - } else { - /* - * Someone else deleted the shp from namespace - * idr/kht while we have waited. - * Just unlock and continue. - */ - shm_unlock(shp); + if (shm_may_destroy(ns, shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); } - - up_write(&shm_ids(ns).rwsem); - put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */ } + + /* Remove the list head from any segments still attached. */ + list_del(&task->sysvshm.shm_clist); + up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -690,11 +607,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) goto no_id; } - shp->ns = ns; - - task_lock(current); list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); - task_unlock(current); /* * shmid gets reported as "inode#" in /proc/pid/maps. @@ -1339,8 +1252,7 @@ out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); shp->shm_nattch--; - - if (shm_may_destroy(shp)) + if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); diff --git a/kernel/Makefile b/kernel/Makefile index 0cb20e5b5bab..e29fd0db5cfc 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -47,6 +47,9 @@ obj-$(CONFIG_PROFILING) += profile.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ obj-$(CONFIG_FUTEX) += futex.o +ifeq ($(CONFIG_COMPAT),y) +obj-$(CONFIG_FUTEX) += futex_compat.o +endif obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o ifneq ($(CONFIG_SMP),y) @@ -90,6 +93,9 @@ obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o obj-$(CONFIG_TRACEPOINTS) += tracepoint.o obj-$(CONFIG_OOM_SCORE_NOTIFIER) += oom_score_notifier.o obj-$(CONFIG_LATENCYTOP) += latencytop.o +obj-$(CONFIG_BINFMT_ELF) += elfcore.o +obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o +obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ obj-$(CONFIG_TRACE_CLOCK) += trace/ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3e7dc17aefbb..01431ef8cf07 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -18,8 +18,7 @@ #include #include -int sysctl_unprivileged_bpf_disabled __read_mostly = - IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0; +int sysctl_unprivileged_bpf_disabled __read_mostly; static LIST_HEAD(bpf_map_types); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fe08e12683fe..cd3d81961cc2 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3311,10 +3311,6 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, struct cgroup *cgrp = kn->priv; int ret; - /* do not accept '\n' to prevent making /proc//cgroup unparsable */ - if (strchr(new_name_str, '\n')) - return -EINVAL; - if (kernfs_type(kn) != KERNFS_DIR) return -ENOTDIR; if (kn->parent != new_parent) diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index f51b762d6886..533e04e75a9c 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -234,7 +234,7 @@ extern struct task_struct *kdb_curr_task(int); #define kdb_do_each_thread(g, p) do_each_thread(g, p) #define kdb_while_each_thread(g, p) while_each_thread(g, p) -#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL) +#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) extern void *debug_kmalloc(size_t size, gfp_t flags); extern void debug_kfree(void *); diff --git a/kernel/elfcore.c b/kernel/elfcore.c new file mode 100644 index 000000000000..a2b29b9bdfcb --- /dev/null +++ b/kernel/elfcore.c @@ -0,0 +1,25 @@ +#include +#include +#include +#include +#include + +Elf_Half __weak elf_core_extra_phdrs(void) +{ + return 0; +} + +int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) +{ + return 1; +} + +int __weak elf_core_write_extra_data(struct coredump_params *cprm) +{ + return 1; +} + +size_t __weak elf_core_extra_data_size(void) +{ + return 0; +} diff --git a/kernel/events/core.c b/kernel/events/core.c index a1fc28e6ea6a..bbd9c080ffd0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3581,9 +3581,7 @@ find_get_context(struct pmu *pmu, struct task_struct *task, cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); ctx = &cpuctx->ctx; get_ctx(ctx); - raw_spin_lock_irqsave(&ctx->lock, flags); ++ctx->pin_count; - raw_spin_unlock_irqrestore(&ctx->lock, flags); return ctx; } diff --git a/kernel/exit.c b/kernel/exit.c index 9a1cf088c00d..f603506de3b6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -401,7 +401,7 @@ static void exit_mm(struct task_struct *tsk) struct core_state *core_state; int mm_released; - exit_mm_release(tsk, mm); + mm_release(tsk, mm); if (!mm) return; sync_mm_rss(mm); @@ -717,19 +717,36 @@ void do_exit(long code) */ if (unlikely(tsk->flags & PF_EXITING)) { pr_alert("Fixing recursive fault but reboot is needed!\n"); - futex_exit_recursive(tsk); + /* + * We can do this unlocked here. The futex code uses + * this flag just to verify whether the pi state + * cleanup has been done or not. In the worst case it + * loops once more. We pretend that the cleanup was + * done as there is no way to return. Either the + * OWNER_DIED bit is set by now or we push the blocked + * task into the wait for ever nirwana as well. + */ + tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_signals(tsk); /* sets PF_EXITING */ + sched_exit(tsk); schedtune_exit_task(tsk); if (tsk->flags & PF_SU) { su_exit(); } + /* + * tsk->flags are checked in the futex code to protect against + * an exiting task cleaning up the robust pi futexes. + */ + smp_mb(); + raw_spin_unlock_wait(&tsk->pi_lock); + if (unlikely(in_atomic())) { pr_info("note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), @@ -806,6 +823,12 @@ void do_exit(long code) * Make sure we are holding no locks: */ debug_check_no_locks_held(); + /* + * We can do this unlocked here. The futex code uses this flag + * just to verify whether the pi state cleanup has been done + * or not. In the worst case it loops once more. + */ + tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index ce11654fc595..64d2486c3b00 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -929,8 +929,24 @@ static int wait_for_vfork_done(struct task_struct *child, * restoring the old one. . . * Eric Biederman 10 January 1998 */ -static void mm_release(struct task_struct *tsk, struct mm_struct *mm) +void mm_release(struct task_struct *tsk, struct mm_struct *mm) { + /* Get rid of any futexes when releasing the mm */ +#ifdef CONFIG_FUTEX + if (unlikely(tsk->robust_list)) { + exit_robust_list(tsk); + tsk->robust_list = NULL; + } +#ifdef CONFIG_COMPAT + if (unlikely(tsk->compat_robust_list)) { + compat_exit_robust_list(tsk); + tsk->compat_robust_list = NULL; + } +#endif + if (unlikely(!list_empty(&tsk->pi_state_list))) + exit_pi_state_list(tsk); +#endif + uprobe_free_utask(tsk); /* Get rid of any cached register state */ @@ -963,18 +979,6 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm) complete_vfork_done(tsk); } -void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) -{ - futex_exit_release(tsk); - mm_release(tsk, mm); -} - -void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) -{ - futex_exec_release(tsk); - mm_release(tsk, mm); -} - /* * Allocate a new mm structure and copy contents from the * mm structure of the passed in task structure. @@ -1551,8 +1555,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, #ifdef CONFIG_BLOCK p->plug = NULL; #endif - futex_init_task(p); - +#ifdef CONFIG_FUTEX + p->robust_list = NULL; +#ifdef CONFIG_COMPAT + p->compat_robust_list = NULL; +#endif + INIT_LIST_HEAD(&p->pi_state_list); + p->pi_state_cache = NULL; +#endif /* * sigaltstack should be cleared when sharing the same VM */ @@ -1740,6 +1750,7 @@ bad_fork_cleanup_audit: bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: + free_task_load_ptrs(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: diff --git a/kernel/futex.c b/kernel/futex.c index 5990a8f95ae0..e3ef6934b37f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -44,7 +44,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include #include #include #include @@ -172,10 +171,8 @@ * double_lock_hb() and double_unlock_hb(), respectively. */ -#ifdef CONFIG_HAVE_FUTEX_CMPXCHG -#define futex_cmpxchg_enabled 1 -#else -static int __read_mostly futex_cmpxchg_enabled; +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG +int __read_mostly futex_cmpxchg_enabled; #endif /* @@ -331,12 +328,6 @@ static inline bool should_fail_futex(bool fshared) } #endif /* CONFIG_FAIL_FUTEX */ -#ifdef CONFIG_COMPAT -static void compat_exit_robust_list(struct task_struct *curr); -#else -static inline void compat_exit_robust_list(struct task_struct *curr) { } -#endif - static inline void futex_get_mm(union futex_key *key) { atomic_inc(&key->private.mm->mm_count); @@ -825,7 +816,7 @@ static int refill_pi_state_cache(void) return 0; } -static struct futex_pi_state *alloc_pi_state(void) +static struct futex_pi_state * alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; @@ -835,41 +826,10 @@ static struct futex_pi_state *alloc_pi_state(void) return pi_state; } -static void pi_state_update_owner(struct futex_pi_state *pi_state, - struct task_struct *new_owner) -{ - struct task_struct *old_owner = pi_state->owner; - - lockdep_assert_held(&pi_state->pi_mutex.wait_lock); - - if (old_owner) { - raw_spin_lock(&old_owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); - raw_spin_unlock(&old_owner->pi_lock); - } - - if (new_owner) { - raw_spin_lock(&new_owner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &new_owner->pi_state_list); - pi_state->owner = new_owner; - raw_spin_unlock(&new_owner->pi_lock); - } -} - -static void get_pi_state(struct futex_pi_state *pi_state) -{ - WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); -} - /* - * Drops a reference to the pi_state object and frees or caches it - * when the last reference is gone. - * * Must be called with the hb lock held. */ -static void put_pi_state(struct futex_pi_state *pi_state) +static void free_pi_state(struct futex_pi_state *pi_state) { if (!pi_state) return; @@ -882,10 +842,11 @@ static void put_pi_state(struct futex_pi_state *pi_state) * and has cleaned up the pi_state already */ if (pi_state->owner) { - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - pi_state_update_owner(pi_state, NULL); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - rt_mutex_proxy_unlock(&pi_state->pi_mutex); + raw_spin_lock_irq(&pi_state->owner->pi_lock); + list_del_init(&pi_state->list); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); + + rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); } if (current->pi_state_cache) @@ -906,7 +867,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) * Look up the task based on what TID userspace gave us. * We dont trust it. */ -static struct task_struct *futex_find_get_task(pid_t pid) +static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; @@ -925,7 +886,7 @@ static struct task_struct *futex_find_get_task(pid_t pid) * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ -static void exit_pi_state_list(struct task_struct *curr) +void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; @@ -966,11 +927,9 @@ static void exit_pi_state_list(struct task_struct *curr) pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); - get_pi_state(pi_state); - spin_unlock(&hb->lock); + rt_mutex_unlock(&pi_state->pi_mutex); - rt_mutex_futex_unlock(&pi_state->pi_mutex); - put_pi_state(pi_state); + spin_unlock(&hb->lock); raw_spin_lock_irq(&curr->pi_lock); } @@ -1024,41 +983,7 @@ static void exit_pi_state_list(struct task_struct *curr) * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space - * TID out of sync. Except one error case where the kernel is denied - * write access to the user address, see fixup_pi_state_owner(). - * - * - * Serialization and lifetime rules: - * - * hb->lock: - * - * hb -> futex_q, relation - * futex_q -> pi_state, relation - * - * (cannot be raw because hb can contain arbitrary amount - * of futex_q's) - * - * pi_mutex->wait_lock: - * - * {uval, pi_state} - * - * (and pi_mutex 'obviously') - * - * p->pi_lock: - * - * p->pi_state_list -> pi_state->list, relation - * - * pi_state->refcount: - * - * pi_state lifetime - * - * - * Lock order: - * - * hb->lock - * pi_mutex->wait_lock - * p->pi_lock - * + * TID out of sync. */ /* @@ -1066,12 +991,10 @@ static void exit_pi_state_list(struct task_struct *curr) * the pi_state against the user space value. If correct, attach to * it. */ -static int attach_to_pi_state(u32 __user *uaddr, u32 uval, - struct futex_pi_state *pi_state, +static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, struct futex_pi_state **ps) { pid_t pid = uval & FUTEX_TID_MASK; - int ret, uval2; /* * Userspace might have messed up non-PI and PI futexes [3] @@ -1079,38 +1002,8 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, if (unlikely(!pi_state)) return -EINVAL; - /* - * We get here with hb->lock held, and having found a - * futex_top_waiter(). This means that futex_lock_pi() of said futex_q - * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), - * which in turn means that futex_lock_pi() still has a reference on - * our pi_state. - * - * The waiter holding a reference on @pi_state also protects against - * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() - * and futex_wait_requeue_pi() as it cannot go to 0 and consequently - * free pi_state before we can take a reference ourselves. - */ WARN_ON(!atomic_read(&pi_state->refcount)); - /* - * Now that we have a pi_state, we can acquire wait_lock - * and do the state validation. - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - - /* - * Since {uval, pi_state} is serialized by wait_lock, and our current - * uval was read without holding it, it can have changed. Verify it - * still is what we expect it to be, otherwise retry the entire - * operation. - */ - if (get_futex_value_locked(&uval2, uaddr)) - goto out_efault; - - if (uval != uval2) - goto out_eagain; - /* * Handle the owner died case: */ @@ -1126,11 +1019,11 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, * is not 0. Inconsistent state. [5] */ if (pid) - goto out_einval; + return -EINVAL; /* * Take a ref on the state and return success. [4] */ - goto out_attach; + goto out_state; } /* @@ -1142,14 +1035,14 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, * Take a ref on the state and return success. [6] */ if (!pid) - goto out_attach; + goto out_state; } else { /* * If the owner died bit is not set, then the pi_state * must have an owner. [7] */ if (!pi_state->owner) - goto out_einval; + return -EINVAL; } /* @@ -1158,124 +1051,19 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, * user space TID. [9/10] */ if (pid != task_pid_vnr(pi_state->owner)) - goto out_einval; - -out_attach: - get_pi_state(pi_state); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + return -EINVAL; +out_state: + atomic_inc(&pi_state->refcount); *ps = pi_state; return 0; - -out_einval: - ret = -EINVAL; - goto out_error; - -out_eagain: - ret = -EAGAIN; - goto out_error; - -out_efault: - ret = -EFAULT; - goto out_error; - -out_error: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - return ret; -} - -/** - * wait_for_owner_exiting - Block until the owner has exited - * @exiting: Pointer to the exiting task - * - * Caller must hold a refcount on @exiting. - */ -static void wait_for_owner_exiting(int ret, struct task_struct *exiting) -{ - if (ret != -EBUSY) { - WARN_ON_ONCE(exiting); - return; - } - - if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) - return; - - mutex_lock(&exiting->futex_exit_mutex); - /* - * No point in doing state checking here. If the waiter got here - * while the task was in exec()->exec_futex_release() then it can - * have any FUTEX_STATE_* value when the waiter has acquired the - * mutex. OK, if running, EXITING or DEAD if it reached exit() - * already. Highly unlikely and not a problem. Just one more round - * through the futex maze. - */ - mutex_unlock(&exiting->futex_exit_mutex); - - put_task_struct(exiting); -} - -static int handle_exit_race(u32 __user *uaddr, u32 uval, - struct task_struct *tsk) -{ - u32 uval2; - - /* - * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the - * caller that the alleged owner is busy. - */ - if (tsk && tsk->futex_state != FUTEX_STATE_DEAD) - return -EBUSY; - - /* - * Reread the user space value to handle the following situation: - * - * CPU0 CPU1 - * - * sys_exit() sys_futex() - * do_exit() futex_lock_pi() - * futex_lock_pi_atomic() - * exit_signals(tsk) No waiters: - * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID - * mm_release(tsk) Set waiter bit - * exit_robust_list(tsk) { *uaddr = 0x80000PID; - * Set owner died attach_to_pi_owner() { - * *uaddr = 0xC0000000; tsk = get_task(PID); - * } if (!tsk->flags & PF_EXITING) { - * ... attach(); - * tsk->futex_state = } else { - * FUTEX_STATE_DEAD; if (tsk->futex_state != - * FUTEX_STATE_DEAD) - * return -EAGAIN; - * return -ESRCH; <--- FAIL - * } - * - * Returning ESRCH unconditionally is wrong here because the - * user space value has been changed by the exiting task. - * - * The same logic applies to the case where the exiting task is - * already gone. - */ - if (get_futex_value_locked(&uval2, uaddr)) - return -EFAULT; - - /* If the user space value has changed, try again. */ - if (uval2 != uval) - return -EAGAIN; - - /* - * The exiting task did not have a robust list, the robust list was - * corrupted or the user space value in *uaddr is simply bogus. - * Give up and tell user space. - */ - return -ESRCH; } /* * Lookup the task for the TID provided from user space and attach to * it after doing proper sanity checks. */ -static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, - struct futex_pi_state **ps, - struct task_struct **exiting) +static int attach_to_pi_owner(u32 uval, union futex_key *key, + struct futex_pi_state **ps) { pid_t pid = uval & FUTEX_TID_MASK; struct futex_pi_state *pi_state; @@ -1284,15 +1072,12 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, /* * We are the first waiter - try to look up the real owner and attach * the new pi_state to it, but bail out when TID = 0 [1] - * - * The !pid check is paranoid. None of the call sites should end up - * with pid == 0, but better safe than sorry. Let the caller retry */ if (!pid) - return -EAGAIN; + return -ESRCH; p = futex_find_get_task(pid); if (!p) - return handle_exit_race(uaddr, uval, NULL); + return -ESRCH; if (unlikely(p->flags & PF_KTHREAD)) { put_task_struct(p); @@ -1300,41 +1085,27 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, } /* - * We need to look at the task state to figure out, whether the - * task is exiting. To protect against the change of the task state - * in futex_exit_release(), we do this protected by p->pi_lock: + * We need to look at the task state flags to figure out, + * whether the task is exiting. To protect against the do_exit + * change of the task flags, we do this protected by + * p->pi_lock: */ raw_spin_lock_irq(&p->pi_lock); - if (unlikely(p->futex_state != FUTEX_STATE_OK)) { + if (unlikely(p->flags & PF_EXITING)) { /* - * The task is on the way out. When the futex state is - * FUTEX_STATE_DEAD, we know that the task has finished - * the cleanup: + * The task is on the way out. When PF_EXITPIDONE is + * set, we know that the task has finished the + * cleanup: */ - int ret = handle_exit_race(uaddr, uval, p); + int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; raw_spin_unlock_irq(&p->pi_lock); - /* - * If the owner task is between FUTEX_STATE_EXITING and - * FUTEX_STATE_DEAD then store the task pointer and keep - * the reference on the task struct. The calling code will - * drop all locks, wait for the task to reach - * FUTEX_STATE_DEAD and then drop the refcount. This is - * required to prevent a live lock when the current task - * preempted the exiting task between the two states. - */ - if (ret == -EBUSY) - *exiting = p; - else - put_task_struct(p); + put_task_struct(p); return ret; } /* * No existing pi state. First waiter. [2] - * - * This creates pi_state, we have hb->lock held, this means nothing can - * observe this state, wait_lock is irrelevant. */ pi_state = alloc_pi_state(); @@ -1359,10 +1130,8 @@ static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, return 0; } -static int lookup_pi_state(u32 __user *uaddr, u32 uval, - struct futex_hash_bucket *hb, - union futex_key *key, struct futex_pi_state **ps, - struct task_struct **exiting) +static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, + union futex_key *key, struct futex_pi_state **ps) { struct futex_q *match = futex_top_waiter(hb, key); @@ -1371,13 +1140,13 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval, * attach to the pi_state when the validation succeeds. */ if (match) - return attach_to_pi_state(uaddr, uval, match->pi_state, ps); + return attach_to_pi_state(uval, match->pi_state, ps); /* * We are the first waiter - try to look up the owner based on * @uval and attach to it. */ - return attach_to_pi_owner(uaddr, uval, key, ps, exiting); + return attach_to_pi_owner(uval, key, ps); } static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) @@ -1390,7 +1159,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; - /* If user space value changed, let the caller retry */ + /*If user space value changed, let the caller retry */ return curval != uval ? -EAGAIN : 0; } @@ -1403,8 +1172,6 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. - * @exiting: Pointer to store the task pointer of the owner task - * which is in the middle of exiting * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Return: @@ -1413,17 +1180,11 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) * <0 - error * * The hb->lock and futex_key refs shall be held by the caller. - * - * @exiting is only set when the return value is -EBUSY. If so, this holds - * a refcount on the exiting task on return and the caller needs to drop it - * after waiting for the exit to complete. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, - struct task_struct *task, - struct task_struct **exiting, - int set_waiters) + struct task_struct *task, int set_waiters) { u32 uval, newval, vpid = task_pid_vnr(task); struct futex_q *match; @@ -1454,7 +1215,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, */ match = futex_top_waiter(hb, key); if (match) - return attach_to_pi_state(uaddr, uval, match->pi_state, ps); + return attach_to_pi_state(uval, match->pi_state, ps); /* * No waiter and user TID is 0. We are here because the @@ -1493,7 +1254,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * attach to the owner. If that fails, no harm done, we only * set the FUTEX_WAITERS bit in the user space variable. */ - return attach_to_pi_owner(uaddr, newval, key, ps, exiting); + return attach_to_pi_owner(uval, key, ps); } /** @@ -1544,35 +1305,41 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) q->lock_ptr = NULL; } -/* - * Caller must hold a reference on @pi_state. - */ -static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, + struct futex_hash_bucket *hb) { - u32 uninitialized_var(curval), newval; struct task_struct *new_owner; - bool deboost = false; + struct futex_pi_state *pi_state = this->pi_state; + u32 uninitialized_var(curval), newval; WAKE_Q(wake_q); + bool deboost; int ret = 0; + if (!pi_state) + return -EINVAL; + + /* + * If current does not own the pi_state then the futex is + * inconsistent and user space fiddled with the futex value. + */ + if (pi_state->owner != current) + return -EINVAL; + + raw_spin_lock(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); - if (WARN_ON_ONCE(!new_owner)) { - /* - * As per the comment in futex_unlock_pi() this should not happen. - * - * When this happens, give up our locks and try again, giving - * the futex_lock_pi() instance time to complete, either by - * waiting on the rtmutex or removing itself from the futex - * queue. - */ - ret = -EAGAIN; - goto out_unlock; - } /* - * We pass it to the next owner. The WAITERS bit is always kept - * enabled while there is PI state around. We cleanup the owner - * died bit, because we are the owner. + * It is possible that the next waiter (the one that brought + * this owner to the kernel) timed out and is no longer + * waiting on the lock. + */ + if (!new_owner) + new_owner = this->task; + + /* + * We pass it to the next owner. The WAITERS bit is always + * kept enabled while there is PI state around. We cleanup the + * owner died bit, because we are the owner. */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); @@ -1581,7 +1348,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { ret = -EFAULT; - } else if (curval != uval) { /* * If a unconditional UNLOCK_PI operation (user space did not @@ -1594,26 +1360,38 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ else ret = -EINVAL; } - - if (!ret) { - /* - * This is a point of no return; once we modified the uval - * there is no going back and subsequent operations must - * not fail. - */ - pi_state_update_owner(pi_state, new_owner); - deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + if (ret) { + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); + return ret; } -out_unlock: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + raw_spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); + + raw_spin_lock_irq(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; + raw_spin_unlock_irq(&new_owner->pi_lock); + + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); - if (deboost) { - wake_up_q(&wake_q); + deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + + /* + * First unlock HB so the waiter does not spin on it once he got woken + * up. Second wake up the waiter before the priority is adjusted. If we + * deboost first (and lose our higher priority), then the task might get + * scheduled away before the wake up can take place. + */ + spin_unlock(&hb->lock); + wake_up_q(&wake_q); + if (deboost) rt_mutex_adjust_prio(current); - } - return ret; + return 0; } /* @@ -1902,8 +1680,6 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer - * @exiting: Pointer to store the task pointer of the owner task - * which is in the middle of exiting * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) * * Try and get the lock on behalf of the top waiter if we can do it atomically. @@ -1911,20 +1687,16 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. * - * @exiting is only set when the return value is -EBUSY. If so, this holds - * a refcount on the exiting task on return and the caller needs to drop it - * after waiting for the exit to complete. - * * Return: * 0 - failed to acquire the lock atomically; * >0 - acquired the lock, return value is vpid of the top_waiter * <0 - error */ -static int -futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, - struct futex_hash_bucket *hb2, union futex_key *key1, - union futex_key *key2, struct futex_pi_state **ps, - struct task_struct **exiting, int set_waiters) +static int futex_proxy_trylock_atomic(u32 __user *pifutex, + struct futex_hash_bucket *hb1, + struct futex_hash_bucket *hb2, + union futex_key *key1, union futex_key *key2, + struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; @@ -1961,7 +1733,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, */ vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, - exiting, set_waiters); + set_waiters); if (ret == 1) { requeue_pi_wake_futex(top_waiter, key2, hb2); return vpid; @@ -2081,8 +1853,6 @@ retry_private: } if (requeue_pi && (task_count - nr_wake < nr_requeue)) { - struct task_struct *exiting = NULL; - /* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS @@ -2090,8 +1860,7 @@ retry_private: * faults rather in the requeue loop below. */ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, - &key2, &pi_state, - &exiting, nr_requeue); + &key2, &pi_state, nr_requeue); /* * At this point the top_waiter has either taken uaddr2 or is @@ -2115,15 +1884,14 @@ retry_private: * rereading and handing potential crap to * lookup_pi_state. */ - ret = lookup_pi_state(uaddr2, ret, hb2, &key2, - &pi_state, &exiting); + ret = lookup_pi_state(ret, hb2, &key2, &pi_state); } switch (ret) { case 0: break; case -EFAULT: - put_pi_state(pi_state); + free_pi_state(pi_state); pi_state = NULL; double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); @@ -2133,26 +1901,19 @@ retry_private: if (!ret) goto retry; goto out; - case -EBUSY: case -EAGAIN: /* * Two reasons for this: - * - EBUSY: Owner is exiting and we just wait for the + * - Owner is exiting and we just wait for the * exit to complete. - * - EAGAIN: The user space value changed. + * - The user space value changed. */ - put_pi_state(pi_state); + free_pi_state(pi_state); pi_state = NULL; double_unlock_hb(hb1, hb2); hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); - /* - * Handle the case where the owner is in the middle of - * exiting. Wait for the exit to complete otherwise - * this task might loop forever, aka. live lock. - */ - wait_for_owner_exiting(ret, exiting); cond_resched(); goto retry; default: @@ -2203,7 +1964,7 @@ retry_private: */ if (requeue_pi) { /* Prepare the waiter to take the rt_mutex. */ - get_pi_state(pi_state); + atomic_inc(&pi_state->refcount); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, @@ -2216,7 +1977,7 @@ retry_private: } else if (ret) { /* -EDEADLK */ this->pi_state = NULL; - put_pi_state(pi_state); + free_pi_state(pi_state); goto out_unlock; } } @@ -2225,7 +1986,7 @@ retry_private: } out_unlock: - put_pi_state(pi_state); + free_pi_state(pi_state); double_unlock_hb(hb1, hb2); wake_up_q(&wake_q); hb_waiters_dec(hb2); @@ -2279,7 +2040,20 @@ queue_unlock(struct futex_hash_bucket *hb) hb_waiters_dec(hb); } -static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) +/** + * queue_me() - Enqueue the futex_q on the futex_hash_bucket + * @q: The futex_q to enqueue + * @hb: The destination hash bucket + * + * The hb->lock must be held by the caller, and is released here. A call to + * queue_me() is typically paired with exactly one call to unqueue_me(). The + * exceptions involve the PI related operations, which may use unqueue_me_pi() + * or nothing if the unqueue is done as part of the wake process and the unqueue + * state is implicit in the state of woken task (see futex_wait_requeue_pi() for + * an example). + */ +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + __releases(&hb->lock) { int prio; @@ -2296,24 +2070,6 @@ static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); q->task = current; -} - -/** - * queue_me() - Enqueue the futex_q on the futex_hash_bucket - * @q: The futex_q to enqueue - * @hb: The destination hash bucket - * - * The hb->lock must be held by the caller, and is released here. A call to - * queue_me() is typically paired with exactly one call to unqueue_me(). The - * exceptions involve the PI related operations, which may use unqueue_me_pi() - * or nothing if the unqueue is done as part of the wake process and the unqueue - * state is implicit in the state of woken task (see futex_wait_requeue_pi() for - * an example). - */ -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - __releases(&hb->lock) -{ - __queue_me(q, hb); spin_unlock(&hb->lock); } @@ -2383,97 +2139,53 @@ static void unqueue_me_pi(struct futex_q *q) __unqueue_futex(q); BUG_ON(!q->pi_state); - put_pi_state(q->pi_state); + free_pi_state(q->pi_state); q->pi_state = NULL; spin_unlock(q->lock_ptr); } -static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *argowner) +/* + * Fixup the pi_state owner with the new owner. + * + * Must be called with hash bucket lock held and mm->sem held for non + * private futexes. + */ +static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + struct task_struct *newowner) { + u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; - struct task_struct *oldowner, *newowner; - u32 uval, curval, newval, newtid; - int err = 0; + struct task_struct *oldowner = pi_state->owner; + u32 uval, uninitialized_var(curval), newval; + int ret; + + /* Owner died? */ + if (!pi_state->owner) + newtid |= FUTEX_OWNER_DIED; - oldowner = pi_state->owner; /* - * We are here because either: - * - * - we stole the lock and pi_state->owner needs updating to reflect - * that (@argowner == current), - * - * or: - * - * - someone stole our lock and we need to fix things to point to the - * new owner (@argowner == NULL). - * - * Either way, we have to replace the TID in the user space variable. + * We are here either because we stole the rtmutex from the + * previous highest priority waiter or we are the highest priority + * waiter but failed to get the rtmutex the first time. + * We have to replace the newowner TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * * Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * - * Modifying pi_state _before_ the user space value would leave the - * pi_state in an inconsistent state when we fault here, because we - * need to drop the locks to handle the fault. This might be observed - * in the PID check in lookup_pi_state. + * Modifying pi_state _before_ the user space value would + * leave the pi_state in an inconsistent state when we fault + * here, because we need to drop the hash bucket lock to + * handle the fault. This might be observed in the PID check + * in lookup_pi_state. */ retry: - if (!argowner) { - if (oldowner != current) { - /* - * We raced against a concurrent self; things are - * already fixed up. Nothing to do. - */ - return 0; - } - - if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { - /* We got the lock. pi_state is correct. Tell caller */ - return 1; - } - - /* - * The trylock just failed, so either there is an owner or - * there is a higher priority waiter than this one. - */ - newowner = rt_mutex_owner(&pi_state->pi_mutex); - /* - * If the higher priority waiter has not yet taken over the - * rtmutex then newowner is NULL. We can't return here with - * that state because it's inconsistent vs. the user space - * state. So drop the locks and try again. It's a valid - * situation and not any different from the other retry - * conditions. - */ - if (unlikely(!newowner)) { - err = -EAGAIN; - goto handle_fault; - } - } else { - WARN_ON_ONCE(argowner != current); - if (oldowner == current) { - /* - * We raced against a concurrent self; things are - * already fixed up. Nothing to do. - */ - return 1; - } - newowner = argowner; - } - - newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; - /* Owner died? */ - if (!pi_state->owner) - newtid |= FUTEX_OWNER_DIED; - if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; - for (;;) { + while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) @@ -2487,75 +2199,48 @@ retry: * We fixed up user space. Now we need to fix the pi_state * itself. */ - pi_state_update_owner(pi_state, newowner); + if (pi_state->owner != NULL) { + raw_spin_lock_irq(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); + raw_spin_unlock_irq(&pi_state->owner->pi_lock); + } - return argowner == current; + pi_state->owner = newowner; + + raw_spin_lock_irq(&newowner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &newowner->pi_state_list); + raw_spin_unlock_irq(&newowner->pi_lock); + return 0; /* - * To handle the page fault we need to drop the locks here. That gives - * the other task (either the highest priority waiter itself or the - * task which stole the rtmutex) the chance to try the fixup of the - * pi_state. So once we are back from handling the fault we need to - * check the pi_state after reacquiring the locks and before trying to - * do another fixup. When the fixup has been done already we simply - * return. - * - * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely - * drop hb->lock since the caller owns the hb -> futex_q relation. - * Dropping the pi_mutex->wait_lock requires the state revalidate. + * To handle the page fault we need to drop the hash bucket + * lock here. That gives the other task (either the highest priority + * waiter itself or the task which stole the rtmutex) the + * chance to try the fixup of the pi_state. So once we are + * back from handling the fault we need to check the pi_state + * after reacquiring the hash bucket lock and before trying to + * do another fixup. When the fixup has been done already we + * simply return. */ handle_fault: - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(q->lock_ptr); - err = fault_in_user_writeable(uaddr); + ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Check if someone else fixed it for us: */ if (pi_state->owner != oldowner) - return argowner == current; - - /* Retry if err was -EAGAIN or the fault in succeeded */ - if (!err) - goto retry; - - /* - * fault_in_user_writeable() failed so user state is immutable. At - * best we can make the kernel state consistent but user state will - * be most likely hosed and any subsequent unlock operation will be - * rejected due to PI futex rule [10]. - * - * Ensure that the rtmutex owner is also the pi_state owner despite - * the user space value claiming something different. There is no - * point in unlocking the rtmutex if current is the owner as it - * would need to wait until the next waiter has taken the rtmutex - * to guarantee consistent state. Keep it simple. Userspace asked - * for this wreckaged state. - * - * The rtmutex has an owner - either current or some other - * task. See the EAGAIN loop above. - */ - pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex)); - - return err; -} - -static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *argowner) -{ - struct futex_pi_state *pi_state = q->pi_state; - int ret; + return 0; - lockdep_assert_held(q->lock_ptr); + if (ret) + return ret; - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - ret = __fixup_pi_state_owner(uaddr, q, argowner); - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - return ret; + goto retry; } static long futex_wait_restart(struct restart_block *restart); @@ -2577,39 +2262,60 @@ static long futex_wait_restart(struct restart_block *restart); */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { + struct task_struct *owner; + int ret = 0; + if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: - * - * Speculative pi_state->owner read (we don't hold wait_lock); - * since we own the lock pi_state->owner == current is the - * stable state, anything else needs more attention. */ if (q->pi_state->owner != current) - return fixup_pi_state_owner(uaddr, q, current); - return 1; + ret = fixup_pi_state_owner(uaddr, q, current); + goto out; } /* - * If we didn't get the lock; check if anybody stole it from us. In - * that case, we need to fix up the uval to point to them instead of - * us, otherwise bad things happen. [10] - * - * Another speculative read; pi_state->owner == current is unstable - * but needs our attention. + * Catch the rare case, where the lock was released when we were on the + * way back before we locked the hash bucket. */ - if (q->pi_state->owner == current) - return fixup_pi_state_owner(uaddr, q, NULL); + if (q->pi_state->owner == current) { + /* + * Try to get the rt_mutex now. This might fail as some other + * task acquired the rt_mutex after we removed ourself from the + * rt_mutex waiters list. + */ + if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { + locked = 1; + goto out; + } + + /* + * pi_state is incorrect, some other task did a lock steal and + * we returned due to timeout or signal without taking the + * rt_mutex. Too late. + */ + raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); + owner = rt_mutex_owner(&q->pi_state->pi_mutex); + if (!owner) + owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); + raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); + ret = fixup_pi_state_owner(uaddr, q, owner); + goto out; + } /* * Paranoia check. If we did not take the lock, then we should not be - * the owner of the rt_mutex. Warn and establish consistent state. + * the owner of the rt_mutex. */ - if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current)) - return fixup_pi_state_owner(uaddr, q, current); + if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) + printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " + "pi-state %p\n", ret, + q->pi_state->pi_mutex.owner, + q->pi_state->owner); - return 0; +out: + return ret ? ret : locked; } /** @@ -2830,8 +2536,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; - struct task_struct *exiting = NULL; - struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int res, ret; @@ -2855,8 +2559,7 @@ retry: retry_private: hb = queue_lock(&q); - ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, - &exiting, 0); + ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); if (unlikely(ret)) { /* * Atomic work succeeded and we got the lock, @@ -2869,22 +2572,15 @@ retry_private: goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; - case -EBUSY: case -EAGAIN: /* * Two reasons for this: - * - EBUSY: Task is exiting and we just wait for the + * - Task is exiting and we just wait for the * exit to complete. - * - EAGAIN: The user space value changed. + * - The user space value changed. */ queue_unlock(hb); put_futex_key(&q.key); - /* - * Handle the case where the owner is in the middle of - * exiting. Wait for the exit to complete otherwise - * this task might loop forever, aka. live lock. - */ - wait_for_owner_exiting(ret, exiting); cond_resched(); goto retry; default: @@ -2892,51 +2588,24 @@ retry_private: } } - WARN_ON(!q.pi_state); - /* * Only actually queue now that the atomic ops are done: */ - __queue_me(&q, hb); - - if (trylock) { - ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); - /* Fixup the trylock return value: */ - ret = ret ? 0 : -EWOULDBLOCK; - goto no_block; - } + queue_me(&q, hb); + WARN_ON(!q.pi_state); /* - * We must add ourselves to the rt_mutex waitlist while holding hb->lock - * such that the hb and rt_mutex wait lists match. + * Block on the PI mutex: */ - rt_mutex_init_waiter(&rt_waiter); - ret = rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); - if (ret) { - if (ret == 1) - ret = 0; - - goto no_block; + if (!trylock) { + ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); + } else { + ret = rt_mutex_trylock(&q.pi_state->pi_mutex); + /* Fixup the trylock return value: */ + ret = ret ? 0 : -EWOULDBLOCK; } - spin_unlock(q.lock_ptr); - - if (unlikely(to)) - hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); - - ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); - spin_lock(q.lock_ptr); - /* - * If we failed to acquire the lock (signal/timeout), we must - * first acquire the hb->lock before removing the lock from the - * rt_mutex waitqueue, such that we can keep the hb and rt_mutex - * wait lists consistent. - */ - if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) - ret = 0; - -no_block: /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. @@ -2949,6 +2618,13 @@ no_block: if (res) ret = (res < 0) ? res : 0; + /* + * If fixup_owner() faulted and was unable to handle the fault, unlock + * it and return the fault to userspace. + */ + if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) + rt_mutex_unlock(&q.pi_state->pi_mutex); + /* Unqueue and drop the lock */ unqueue_me_pi(&q); @@ -2960,10 +2636,8 @@ out_unlock_put_key: out_put_key: put_futex_key(&q.key); out: - if (to) { - hrtimer_cancel(&to->timer); + if (to) destroy_hrtimer_on_stack(&to->timer); - } return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: @@ -3016,39 +2690,10 @@ retry: */ match = futex_top_waiter(hb, &key); if (match) { - struct futex_pi_state *pi_state = match->pi_state; - - ret = -EINVAL; - if (!pi_state) - goto out_unlock; - - /* - * If current does not own the pi_state then the futex is - * inconsistent and user space fiddled with the futex value. - */ - if (pi_state->owner != current) - goto out_unlock; - - get_pi_state(pi_state); - /* - * Since modifying the wait_list is done while holding both - * hb->lock and wait_lock, holding either is sufficient to - * observe it. - * - * By taking wait_lock while still holding hb->lock, we ensure - * there is no point where we hold neither; and therefore - * wake_futex_pi() must observe a state consistent with what we - * observed. - */ - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - spin_unlock(&hb->lock); - - ret = wake_futex_pi(uaddr, uval, pi_state); - - put_pi_state(pi_state); - + ret = wake_futex_pi(uaddr, uval, match, hb); /* - * Success, we're done! No tricky corner cases. + * In case of success wake_futex_pi dropped the hash + * bucket lock. */ if (!ret) goto out_putkey; @@ -3063,6 +2708,7 @@ retry: * setting the FUTEX_WAITERS bit. Try again. */ if (ret == -EAGAIN) { + spin_unlock(&hb->lock); put_futex_key(&key); goto retry; } @@ -3070,7 +2716,7 @@ retry: * wake_futex_pi has detected invalid state. Tell user * space. */ - goto out_putkey; + goto out_unlock; } /* @@ -3080,10 +2726,8 @@ retry: * preserve the WAITERS bit not the OWNER_DIED one. We are the * owner. */ - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) { - spin_unlock(&hb->lock); + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) goto pi_faulted; - } /* * If uval has changed, let user space handle it. @@ -3097,6 +2741,7 @@ out_putkey: return ret; pi_faulted: + spin_unlock(&hb->lock); put_futex_key(&key); ret = fault_in_user_writeable(uaddr); @@ -3226,7 +2871,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - rt_mutex_init_waiter(&rt_waiter); + debug_rt_mutex_init_waiter(&rt_waiter); + RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); + RB_CLEAR_NODE(&rt_waiter.tree_entry); + rt_waiter.task = NULL; ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -3281,17 +2929,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) + rt_mutex_unlock(&q.pi_state->pi_mutex); /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ - put_pi_state(q.pi_state); + free_pi_state(q.pi_state); spin_unlock(q.lock_ptr); - /* - * Adjust the return value. It's either -EFAULT or - * success (1) but the caller expects 0 for success. - */ - ret = ret < 0 ? ret : 0; } } else { struct rt_mutex *pi_mutex; @@ -3322,6 +2967,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, if (res) ret = (res < 0) ? res : 0; + /* + * If fixup_pi_state_owner() faulted and was unable to handle + * the fault, unlock the rt_mutex and return the fault to + * userspace. + */ + if (ret && rt_mutex_owner(pi_mutex) == current) + rt_mutex_unlock(pi_mutex); + /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } @@ -3435,7 +3088,7 @@ err_unlock: * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ -static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) +int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) { u32 uval, uninitialized_var(nval), mval; @@ -3510,7 +3163,7 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, * * We silently return on any sign of list-walking problem. */ -static void exit_robust_list(struct task_struct *curr) +void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; @@ -3573,114 +3226,6 @@ static void exit_robust_list(struct task_struct *curr) curr, pip); } -static void futex_cleanup(struct task_struct *tsk) -{ - if (unlikely(tsk->robust_list)) { - exit_robust_list(tsk); - tsk->robust_list = NULL; - } - -#ifdef CONFIG_COMPAT - if (unlikely(tsk->compat_robust_list)) { - compat_exit_robust_list(tsk); - tsk->compat_robust_list = NULL; - } -#endif - - if (unlikely(!list_empty(&tsk->pi_state_list))) - exit_pi_state_list(tsk); -} - -/** - * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD - * @tsk: task to set the state on - * - * Set the futex exit state of the task lockless. The futex waiter code - * observes that state when a task is exiting and loops until the task has - * actually finished the futex cleanup. The worst case for this is that the - * waiter runs through the wait loop until the state becomes visible. - * - * This is called from the recursive fault handling path in do_exit(). - * - * This is best effort. Either the futex exit code has run already or - * not. If the OWNER_DIED bit has been set on the futex then the waiter can - * take it over. If not, the problem is pushed back to user space. If the - * futex exit code did not run yet, then an already queued waiter might - * block forever, but there is nothing which can be done about that. - */ -void futex_exit_recursive(struct task_struct *tsk) -{ - /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ - if (tsk->futex_state == FUTEX_STATE_EXITING) - mutex_unlock(&tsk->futex_exit_mutex); - tsk->futex_state = FUTEX_STATE_DEAD; -} - -static void futex_cleanup_begin(struct task_struct *tsk) -{ - /* - * Prevent various race issues against a concurrent incoming waiter - * including live locks by forcing the waiter to block on - * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in - * attach_to_pi_owner(). - */ - mutex_lock(&tsk->futex_exit_mutex); - - /* - * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. - * - * This ensures that all subsequent checks of tsk->futex_state in - * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with - * tsk->pi_lock held. - * - * It guarantees also that a pi_state which was queued right before - * the state change under tsk->pi_lock by a concurrent waiter must - * be observed in exit_pi_state_list(). - */ - raw_spin_lock_irq(&tsk->pi_lock); - tsk->futex_state = FUTEX_STATE_EXITING; - raw_spin_unlock_irq(&tsk->pi_lock); -} - -static void futex_cleanup_end(struct task_struct *tsk, int state) -{ - /* - * Lockless store. The only side effect is that an observer might - * take another loop until it becomes visible. - */ - tsk->futex_state = state; - /* - * Drop the exit protection. This unblocks waiters which observed - * FUTEX_STATE_EXITING to reevaluate the state. - */ - mutex_unlock(&tsk->futex_exit_mutex); -} - -void futex_exec_release(struct task_struct *tsk) -{ - /* - * The state handling is done for consistency, but in the case of - * exec() there is no way to prevent futher damage as the PID stays - * the same. But for the unlikely and arguably buggy case that a - * futex is held on exec(), this provides at least as much state - * consistency protection which is possible. - */ - futex_cleanup_begin(tsk); - futex_cleanup(tsk); - /* - * Reset the state to FUTEX_STATE_OK. The task is alive and about - * exec a new binary. - */ - futex_cleanup_end(tsk, FUTEX_STATE_OK); -} - -void futex_exit_release(struct task_struct *tsk) -{ - futex_cleanup_begin(tsk); - futex_cleanup(tsk); - futex_cleanup_end(tsk, FUTEX_STATE_DEAD); -} - long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { @@ -3773,192 +3318,6 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } -#ifdef CONFIG_COMPAT -/* - * Fetch a robust-list pointer. Bit 0 signals PI futexes: - */ -static inline int -compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, - compat_uptr_t __user *head, unsigned int *pi) -{ - if (get_user(*uentry, head)) - return -EFAULT; - - *entry = compat_ptr((*uentry) & ~1); - *pi = (unsigned int)(*uentry) & 1; - - return 0; -} - -static void __user *futex_uaddr(struct robust_list __user *entry, - compat_long_t futex_offset) -{ - compat_uptr_t base = ptr_to_compat(entry); - void __user *uaddr = compat_ptr(base + futex_offset); - - return uaddr; -} - -/* - * Walk curr->robust_list (very carefully, it's a userspace list!) - * and mark any locks found there dead, and notify any waiters. - * - * We silently return on any sign of list-walking problem. - */ -void compat_exit_robust_list(struct task_struct *curr) -{ - struct compat_robust_list_head __user *head = curr->compat_robust_list; - struct robust_list __user *entry, *next_entry, *pending; - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; - unsigned int uninitialized_var(next_pi); - compat_uptr_t uentry, next_uentry, upending; - compat_long_t futex_offset; - int rc; - - if (!futex_cmpxchg_enabled) - return; - - /* - * Fetch the list head (which was registered earlier, via - * sys_set_robust_list()): - */ - if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) - return; - /* - * Fetch the relative futex offset: - */ - if (get_user(futex_offset, &head->futex_offset)) - return; - /* - * Fetch any possibly pending lock-add first, and handle it - * if it exists: - */ - if (compat_fetch_robust_entry(&upending, &pending, - &head->list_op_pending, &pip)) - return; - - next_entry = NULL; /* avoid warning with gcc */ - while (entry != (struct robust_list __user *) &head->list) { - /* - * Fetch the next entry in the list before calling - * handle_futex_death: - */ - rc = compat_fetch_robust_entry(&next_uentry, &next_entry, - (compat_uptr_t __user *)&entry->next, &next_pi); - /* - * A pending lock might already be on the list, so - * dont process it twice: - */ - if (entry != pending) { - void __user *uaddr = futex_uaddr(entry, futex_offset); - - if (handle_futex_death(uaddr, curr, pi)) - return; - } - if (rc) - return; - uentry = next_uentry; - entry = next_entry; - pi = next_pi; - /* - * Avoid excessively long or circular lists: - */ - if (!--limit) - break; - - cond_resched(); - } - if (pending) { - void __user *uaddr = futex_uaddr(pending, futex_offset); - - handle_futex_death(uaddr, curr, pip); - } -} - -COMPAT_SYSCALL_DEFINE2(set_robust_list, - struct compat_robust_list_head __user *, head, - compat_size_t, len) -{ - if (!futex_cmpxchg_enabled) - return -ENOSYS; - - if (unlikely(len != sizeof(*head))) - return -EINVAL; - - current->compat_robust_list = head; - - return 0; -} - -COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, - compat_uptr_t __user *, head_ptr, - compat_size_t __user *, len_ptr) -{ - struct compat_robust_list_head __user *head; - unsigned long ret; - struct task_struct *p; - - if (!futex_cmpxchg_enabled) - return -ENOSYS; - - rcu_read_lock(); - - ret = -ESRCH; - if (!pid) - p = current; - else { - p = find_task_by_vpid(pid); - if (!p) - goto err_unlock; - } - - ret = -EPERM; - if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) - goto err_unlock; - - head = p->compat_robust_list; - rcu_read_unlock(); - - if (put_user(sizeof(*head), len_ptr)) - return -EFAULT; - return put_user(ptr_to_compat(head), head_ptr); - -err_unlock: - rcu_read_unlock(); - - return ret; -} - -COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, - struct compat_timespec __user *, utime, u32 __user *, uaddr2, - u32, val3) -{ - struct timespec ts; - ktime_t t, *tp = NULL; - int val2 = 0; - int cmd = op & FUTEX_CMD_MASK; - - if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || - cmd == FUTEX_WAIT_BITSET || - cmd == FUTEX_WAIT_REQUEUE_PI)) { - if (compat_get_timespec(&ts, utime)) - return -EFAULT; - if (!timespec_valid(&ts)) - return -EINVAL; - - t = timespec_to_ktime(ts); - if (cmd == FUTEX_WAIT) - t = ktime_add_safe(ktime_get(), t); - tp = &t; - } - if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || - cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) - val2 = (int) (unsigned long) utime; - - return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); -} -#endif /* CONFIG_COMPAT */ - static void __init futex_detect_cmpxchg(void) { #ifndef CONFIG_HAVE_FUTEX_CMPXCHG diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c new file mode 100644 index 000000000000..4ae3232e7a28 --- /dev/null +++ b/kernel/futex_compat.c @@ -0,0 +1,201 @@ +/* + * linux/kernel/futex_compat.c + * + * Futex compatibililty routines. + * + * Copyright 2006, Red Hat, Inc., Ingo Molnar + */ + +#include +#include +#include +#include +#include +#include + +#include + + +/* + * Fetch a robust-list pointer. Bit 0 signals PI futexes: + */ +static inline int +fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, + compat_uptr_t __user *head, unsigned int *pi) +{ + if (get_user(*uentry, head)) + return -EFAULT; + + *entry = compat_ptr((*uentry) & ~1); + *pi = (unsigned int)(*uentry) & 1; + + return 0; +} + +static void __user *futex_uaddr(struct robust_list __user *entry, + compat_long_t futex_offset) +{ + compat_uptr_t base = ptr_to_compat(entry); + void __user *uaddr = compat_ptr(base + futex_offset); + + return uaddr; +} + +/* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. + * + * We silently return on any sign of list-walking problem. + */ +void compat_exit_robust_list(struct task_struct *curr) +{ + struct compat_robust_list_head __user *head = curr->compat_robust_list; + struct robust_list __user *entry, *next_entry, *pending; + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; + unsigned int uninitialized_var(next_pi); + compat_uptr_t uentry, next_uentry, upending; + compat_long_t futex_offset; + int rc; + + if (!futex_cmpxchg_enabled) + return; + + /* + * Fetch the list head (which was registered earlier, via + * sys_set_robust_list()): + */ + if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) + return; + /* + * Fetch the relative futex offset: + */ + if (get_user(futex_offset, &head->futex_offset)) + return; + /* + * Fetch any possibly pending lock-add first, and handle it + * if it exists: + */ + if (fetch_robust_entry(&upending, &pending, + &head->list_op_pending, &pip)) + return; + + next_entry = NULL; /* avoid warning with gcc */ + while (entry != (struct robust_list __user *) &head->list) { + /* + * Fetch the next entry in the list before calling + * handle_futex_death: + */ + rc = fetch_robust_entry(&next_uentry, &next_entry, + (compat_uptr_t __user *)&entry->next, &next_pi); + /* + * A pending lock might already be on the list, so + * dont process it twice: + */ + if (entry != pending) { + void __user *uaddr = futex_uaddr(entry, futex_offset); + + if (handle_futex_death(uaddr, curr, pi)) + return; + } + if (rc) + return; + uentry = next_uentry; + entry = next_entry; + pi = next_pi; + /* + * Avoid excessively long or circular lists: + */ + if (!--limit) + break; + + cond_resched(); + } + if (pending) { + void __user *uaddr = futex_uaddr(pending, futex_offset); + + handle_futex_death(uaddr, curr, pip); + } +} + +COMPAT_SYSCALL_DEFINE2(set_robust_list, + struct compat_robust_list_head __user *, head, + compat_size_t, len) +{ + if (!futex_cmpxchg_enabled) + return -ENOSYS; + + if (unlikely(len != sizeof(*head))) + return -EINVAL; + + current->compat_robust_list = head; + + return 0; +} + +COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, + compat_uptr_t __user *, head_ptr, + compat_size_t __user *, len_ptr) +{ + struct compat_robust_list_head __user *head; + unsigned long ret; + struct task_struct *p; + + if (!futex_cmpxchg_enabled) + return -ENOSYS; + + rcu_read_lock(); + + ret = -ESRCH; + if (!pid) + p = current; + else { + p = find_task_by_vpid(pid); + if (!p) + goto err_unlock; + } + + ret = -EPERM; + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) + goto err_unlock; + + head = p->compat_robust_list; + rcu_read_unlock(); + + if (put_user(sizeof(*head), len_ptr)) + return -EFAULT; + return put_user(ptr_to_compat(head), head_ptr); + +err_unlock: + rcu_read_unlock(); + + return ret; +} + +COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, + struct compat_timespec __user *, utime, u32 __user *, uaddr2, + u32, val3) +{ + struct timespec ts; + ktime_t t, *tp = NULL; + int val2 = 0; + int cmd = op & FUTEX_CMD_MASK; + + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || + cmd == FUTEX_WAIT_BITSET || + cmd == FUTEX_WAIT_REQUEUE_PI)) { + if (compat_get_timespec(&ts, utime)) + return -EFAULT; + if (!timespec_valid(&ts)) + return -EINVAL; + + t = timespec_to_ktime(ts); + if (cmd == FUTEX_WAIT) + t = ktime_add_safe(ktime_get(), t); + tp = &t; + } + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) + val2 = (int) (unsigned long) utime; + + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); +} diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 668141c23d8f..4746500d65ec 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -887,15 +887,11 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) irqreturn_t ret; local_bh_disable(); - if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) - local_irq_disable(); ret = action->thread_fn(action->irq, action->dev_id); if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); - if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) - local_irq_enable(); local_bh_enable(); return ret; } diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 1210cd6bcaa6..6030efd4a188 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -575,10 +575,8 @@ static int kexec_calculate_store_digests(struct kimage *image) sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); sha_regions = vzalloc(sha_region_sz); - if (!sha_regions) { - ret = -ENOMEM; + if (!sha_regions) goto out_free_desc; - } desc->tfm = tfm; desc->flags = 0; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 4801259bdc36..33c37dbc56a0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1884,10 +1884,6 @@ int register_kretprobe(struct kretprobe *rp) int i; void *addr; - /* If only rp->kp.addr is specified, check reregistering kprobes */ - if (rp->kp.addr && check_kprobe_rereg(&rp->kp)) - return -EINVAL; - if (kretprobe_blacklist_size) { addr = kprobe_addr(&rp->kp); if (IS_ERR(addr)) @@ -1899,9 +1895,6 @@ int register_kretprobe(struct kretprobe *rp) } } - if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) - return -E2BIG; - rp->kp.pre_handler = pre_handler_kretprobe; rp->kp.post_handler = NULL; rp->kp.fault_handler = NULL; diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 0613c4b1d059..62b6cee8ea7f 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -173,3 +173,12 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) lock->name = name; } +void +rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) +{ +} + +void rt_mutex_deadlock_account_unlock(struct task_struct *task) +{ +} + diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index b585af9a1b50..d0519c3432b6 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -9,6 +9,9 @@ * This file contains macros used solely by rtmutex.c. Debug version. */ +extern void +rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); +extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 532986d82179..dd173df9ee5e 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -163,14 +163,13 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) * 2) Drop lock->wait_lock * 3) Try to unlock the lock with cmpxchg */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) __releases(lock->wait_lock) { struct task_struct *owner = rt_mutex_owner(lock); clear_rt_mutex_waiters(lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); /* * If a new waiter comes in between the unlock and the cmpxchg * we have two situations: @@ -212,12 +211,11 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) /* * Simple slow path only version: lock->owner is protected by lock->wait_lock. */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) __releases(lock->wait_lock) { lock->owner = NULL; - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); return true; } #endif @@ -499,6 +497,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, int ret = 0, depth = 0; struct rt_mutex *lock; bool detect_deadlock; + unsigned long flags; bool requeue = true; detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); @@ -541,7 +540,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* * [1] Task cannot go away as we did a get_task() before ! */ - raw_spin_lock_irq(&task->pi_lock); + raw_spin_lock_irqsave(&task->pi_lock, flags); /* * [2] Get the waiter on which @task is blocked on. @@ -625,7 +624,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * operations. */ if (!raw_spin_trylock(&lock->wait_lock)) { - raw_spin_unlock_irq(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); cpu_relax(); goto retry; } @@ -656,7 +655,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* * No requeue[7] here. Just release @task [8] */ - raw_spin_unlock(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); put_task_struct(task); /* @@ -664,14 +663,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * If there is no owner of the lock, end of chain. */ if (!rt_mutex_owner(lock)) { - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return 0; } /* [10] Grab the next task, i.e. owner of @lock */ task = rt_mutex_owner(lock); get_task_struct(task); - raw_spin_lock(&task->pi_lock); + raw_spin_lock_irqsave(&task->pi_lock, flags); /* * No requeue [11] here. We just do deadlock detection. @@ -686,8 +685,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, top_waiter = rt_mutex_top_waiter(lock); /* [13] Drop locks */ - raw_spin_unlock(&task->pi_lock); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock(&lock->wait_lock); /* If owner is not blocked, end of chain. */ if (!next_lock) @@ -708,7 +707,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, rt_mutex_enqueue(lock, waiter); /* [8] Release the task */ - raw_spin_unlock(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); put_task_struct(task); /* @@ -726,14 +725,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, */ if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) wake_up_process(rt_mutex_top_waiter(lock)->task); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return 0; } /* [10] Grab the next task, i.e. the owner of @lock */ task = rt_mutex_owner(lock); get_task_struct(task); - raw_spin_lock(&task->pi_lock); + raw_spin_lock_irqsave(&task->pi_lock, flags); /* [11] requeue the pi waiters if necessary */ if (waiter == rt_mutex_top_waiter(lock)) { @@ -787,8 +786,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, top_waiter = rt_mutex_top_waiter(lock); /* [13] Drop the locks */ - raw_spin_unlock(&task->pi_lock); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + raw_spin_unlock(&lock->wait_lock); /* * Make the actual exit decisions [12], based on the stored @@ -811,7 +810,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto again; out_unlock_pi: - raw_spin_unlock_irq(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); out_put_task: put_task_struct(task); @@ -821,7 +820,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* * Try to take an rt-mutex * - * Must be called with lock->wait_lock held and interrupts disabled + * Must be called with lock->wait_lock held. * * @lock: The lock to be acquired. * @task: The task which wants to acquire the lock @@ -831,6 +830,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) { + unsigned long flags; + /* * Before testing whether we can acquire @lock, we set the * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all @@ -915,7 +916,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * case, but conditionals are more expensive than a redundant * store. */ - raw_spin_lock(&task->pi_lock); + raw_spin_lock_irqsave(&task->pi_lock, flags); task->pi_blocked_on = NULL; /* * Finish the lock acquisition. @task is the new owner. If @@ -924,7 +925,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, */ if (rt_mutex_has_waiters(lock)) rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); - raw_spin_unlock(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); takeit: /* We got the lock. */ @@ -936,6 +937,8 @@ takeit: */ rt_mutex_set_owner(lock, task); + rt_mutex_deadlock_account_lock(lock, task); + return 1; } @@ -944,7 +947,7 @@ takeit: * * Prepare waiter and propagate pi chain * - * This must be called with lock->wait_lock held and interrupts disabled + * This must be called with lock->wait_lock held. */ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, @@ -955,6 +958,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *top_waiter = waiter; struct rt_mutex *next_lock; int chain_walk = 0, res; + unsigned long flags; /* * Early deadlock detection. We really don't want the task to @@ -968,7 +972,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, if (owner == task) return -EDEADLK; - raw_spin_lock(&task->pi_lock); + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; @@ -981,12 +985,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, task->pi_blocked_on = waiter; - raw_spin_unlock(&task->pi_lock); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); if (!owner) return 0; - raw_spin_lock(&owner->pi_lock); + raw_spin_lock_irqsave(&owner->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { rt_mutex_dequeue_pi(owner, top_waiter); rt_mutex_enqueue_pi(owner, waiter); @@ -1001,7 +1005,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, /* Store the lock on which owner is blocked or NULL */ next_lock = task_blocked_on_lock(owner); - raw_spin_unlock(&owner->pi_lock); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); /* * Even if full deadlock detection is on, if the owner is not * blocked itself, we can avoid finding this out in the chain @@ -1017,12 +1021,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, */ get_task_struct(owner); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, next_lock, waiter, task); - raw_spin_lock_irq(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); return res; } @@ -1031,14 +1035,15 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * Remove the top waiter from the current tasks pi waiter tree and * queue it up. * - * Called with lock->wait_lock held and interrupts disabled. + * Called with lock->wait_lock held. */ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; + unsigned long flags; - raw_spin_lock(¤t->pi_lock); + raw_spin_lock_irqsave(¤t->pi_lock, flags); waiter = rt_mutex_top_waiter(lock); @@ -1060,7 +1065,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, */ lock->owner = (void *) RT_MUTEX_HAS_WAITERS; - raw_spin_unlock(¤t->pi_lock); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); wake_q_add(wake_q, waiter->task); } @@ -1068,7 +1073,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, /* * Remove a waiter from a lock and give up * - * Must be called with lock->wait_lock held and interrupts disabled. I must + * Must be called with lock->wait_lock held and * have just failed to try_to_take_rt_mutex(). */ static void remove_waiter(struct rt_mutex *lock, @@ -1077,11 +1082,12 @@ static void remove_waiter(struct rt_mutex *lock, bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex *next_lock; + unsigned long flags; - raw_spin_lock(¤t->pi_lock); + raw_spin_lock_irqsave(¤t->pi_lock, flags); rt_mutex_dequeue(lock, waiter); current->pi_blocked_on = NULL; - raw_spin_unlock(¤t->pi_lock); + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); /* * Only update priority if the waiter was the highest priority @@ -1090,7 +1096,7 @@ static void remove_waiter(struct rt_mutex *lock, if (!owner || !is_top_waiter) return; - raw_spin_lock(&owner->pi_lock); + raw_spin_lock_irqsave(&owner->pi_lock, flags); rt_mutex_dequeue_pi(owner, waiter); @@ -1102,7 +1108,7 @@ static void remove_waiter(struct rt_mutex *lock, /* Store the lock on which owner is blocked or NULL */ next_lock = task_blocked_on_lock(owner); - raw_spin_unlock(&owner->pi_lock); + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); /* * Don't walk the chain, if the owner task is not blocked @@ -1114,12 +1120,12 @@ static void remove_waiter(struct rt_mutex *lock, /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(owner); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, next_lock, NULL, current); - raw_spin_lock_irq(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); } /* @@ -1151,23 +1157,15 @@ void rt_mutex_adjust_pi(struct task_struct *task) next_lock, NULL, task); } -void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) -{ - debug_rt_mutex_init_waiter(waiter); - RB_CLEAR_NODE(&waiter->pi_tree_entry); - RB_CLEAR_NODE(&waiter->tree_entry); - waiter->task = NULL; -} - /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE - * or TASK_UNINTERRUPTIBLE) + * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * - * Must be called with lock->wait_lock held and interrupts disabled + * lock->wait_lock must be held by the caller. */ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, @@ -1195,13 +1193,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, break; } - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); schedule(); - raw_spin_lock_irq(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); set_current_state(state); } @@ -1238,24 +1236,17 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, enum rtmutex_chainwalk chwalk) { struct rt_mutex_waiter waiter; - unsigned long flags; int ret = 0; - rt_mutex_init_waiter(&waiter); + debug_rt_mutex_init_waiter(&waiter); + RB_CLEAR_NODE(&waiter.pi_tree_entry); + RB_CLEAR_NODE(&waiter.tree_entry); - /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can - * be called in early boot if the cmpxchg() fast path is disabled - * (debug, no architecture support). In this case we will acquire the - * rtmutex with lock->wait_lock held. But we cannot unconditionally - * enable interrupts in that early boot case. So we need to use the - * irqsave/restore variants. - */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); return 0; } @@ -1284,7 +1275,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, */ fixup_rt_mutex_waiters(lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) @@ -1295,25 +1286,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) -{ - int ret = try_to_take_rt_mutex(lock, current, NULL); - - /* - * try_to_take_rt_mutex() sets the lock waiters bit - * unconditionally. Clean this up. - */ - fixup_rt_mutex_waiters(lock); - - return ret; -} - /* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { - unsigned long flags; int ret; /* @@ -1325,14 +1302,20 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) return 0; /* - * The mutex has currently no owner. Lock the wait lock and try to - * acquire the lock. We use irqsave here to support early boot calls. + * The mutex has currently no owner. Lock the wait lock and + * try to acquire the lock. */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + raw_spin_lock(&lock->wait_lock); - ret = __rt_mutex_slowtrylock(lock); + ret = try_to_take_rt_mutex(lock, current, NULL); + + /* + * try_to_take_rt_mutex() sets the lock waiters bit + * unconditionally. Clean this up. + */ + fixup_rt_mutex_waiters(lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); return ret; } @@ -1344,13 +1327,12 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, struct wake_q_head *wake_q) { - unsigned long flags; - - /* irqsave required to support early boot calls */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + raw_spin_lock(&lock->wait_lock); debug_rt_mutex_unlock(lock); + rt_mutex_deadlock_account_unlock(current); + /* * We must be careful here if the fast path is enabled. If we * have no waiters queued we cannot set owner to NULL here @@ -1384,10 +1366,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ while (!rt_mutex_has_waiters(lock)) { /* Drops lock->wait_lock ! */ - if (unlock_rt_mutex_safe(lock, flags) == true) + if (unlock_rt_mutex_safe(lock) == true) return false; /* Relock the rtmutex and try again */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + raw_spin_lock(&lock->wait_lock); } /* @@ -1398,7 +1380,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ mark_wakeup_next_waiter(wake_q, lock); - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + raw_spin_unlock(&lock->wait_lock); /* check PI boosting */ return true; @@ -1416,10 +1398,11 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk)) { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); return 0; - - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + } else + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); } static inline int @@ -1431,19 +1414,21 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, enum rtmutex_chainwalk chwalk)) { if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); return 0; - - return slowfn(lock, state, timeout, chwalk); + } else + return slowfn(lock, state, timeout, chwalk); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); return 1; - + } return slowfn(lock); } @@ -1453,18 +1438,19 @@ rt_mutex_fastunlock(struct rt_mutex *lock, struct wake_q_head *wqh)) { WAKE_Q(wake_q); - bool deboost; - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) - return; + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { + rt_mutex_deadlock_account_unlock(current); - deboost = slowfn(lock, &wake_q); + } else { + bool deboost = slowfn(lock, &wake_q); - wake_up_q(&wake_q); + wake_up_q(&wake_q); - /* Undo pi boosting if necessary: */ - if (deboost) - rt_mutex_adjust_prio(current); + /* Undo pi boosting if necessary: */ + if (deboost) + rt_mutex_adjust_prio(current); + } } /** @@ -1498,16 +1484,16 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); /* - * Futex variant, must not use fastpath. + * Futex variant with full deadlock detection. */ -int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) +int rt_mutex_timed_futex_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout) { - return rt_mutex_slowtrylock(lock); -} + might_sleep(); -int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return __rt_mutex_slowtrylock(lock); + return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + RT_MUTEX_FULL_CHAINWALK, + rt_mutex_slowlock); } /** @@ -1566,38 +1552,20 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) EXPORT_SYMBOL_GPL(rt_mutex_unlock); /** - * Futex variant, that since futex variants do not use the fast-path, can be - * simple and will not need to retry. + * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock + * @lock: the rt_mutex to be unlocked + * + * Returns: true/false indicating whether priority adjustment is + * required or not. */ -bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) +bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wqh) { - lockdep_assert_held(&lock->wait_lock); - - debug_rt_mutex_unlock(lock); - - if (!rt_mutex_has_waiters(lock)) { - lock->owner = NULL; - return false; /* done */ - } - - mark_wakeup_next_waiter(wake_q, lock); - return true; /* deboost and wakeups */ -} - -void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) -{ - WAKE_Q(wake_q); - bool deboost; - - raw_spin_lock_irq(&lock->wait_lock); - deboost = __rt_mutex_futex_unlock(lock, &wake_q); - raw_spin_unlock_irq(&lock->wait_lock); - - if (deboost) { - wake_up_q(&wake_q); - rt_mutex_adjust_prio(current); + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { + rt_mutex_deadlock_account_unlock(current); + return false; } + return rt_mutex_slowunlock(lock, wqh); } /** @@ -1654,6 +1622,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, __rt_mutex_init(lock, NULL); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); + rt_mutex_deadlock_account_lock(lock, proxy_owner); } /** @@ -1664,10 +1633,12 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, * No locking. Caller has to do serializing itself * Special API call for PI-futex support */ -void rt_mutex_proxy_unlock(struct rt_mutex *lock) +void rt_mutex_proxy_unlock(struct rt_mutex *lock, + struct task_struct *proxy_owner) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); + rt_mutex_deadlock_account_unlock(proxy_owner); } /** @@ -1689,10 +1660,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, { int ret; - raw_spin_lock_irq(&lock->wait_lock); + raw_spin_lock(&lock->wait_lock); if (try_to_take_rt_mutex(lock, task, NULL)) { - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); return 1; } @@ -1713,7 +1684,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, if (unlikely(ret)) remove_waiter(lock, waiter); - raw_spin_unlock_irq(&lock->wait_lock); + raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); @@ -1763,16 +1734,20 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, { int ret; - raw_spin_lock_irq(&lock->wait_lock); - /* sleep on the mutex */ + raw_spin_lock(&lock->wait_lock); + set_current_state(TASK_INTERRUPTIBLE); + + /* sleep on the mutex */ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. */ fixup_rt_mutex_waiters(lock); - raw_spin_unlock_irq(&lock->wait_lock); + + raw_spin_unlock(&lock->wait_lock); return ret; } @@ -1802,32 +1777,15 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, bool cleanup = false; raw_spin_lock_irq(&lock->wait_lock); - /* - * Do an unconditional try-lock, this deals with the lock stealing - * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() - * sets a NULL owner. - * - * We're not interested in the return value, because the subsequent - * test on rt_mutex_owner() will infer that. If the trylock succeeded, - * we will own the lock and it will have removed the waiter. If we - * failed the trylock, we're still not owner and we need to remove - * ourselves. - */ - try_to_take_rt_mutex(lock, current, waiter); /* * Unless we're the owner; we're still enqueued on the wait_list. * So check if we became owner, if not, take us off the wait_list. */ if (rt_mutex_owner(lock) != current) { remove_waiter(lock, waiter); + fixup_rt_mutex_waiters(lock); cleanup = true; } - /* - * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might - * have to fix that up. - */ - fixup_rt_mutex_waiters(lock); - raw_spin_unlock_irq(&lock->wait_lock); return cleanup; diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index 6607802efa8b..c4060584c407 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -11,6 +11,8 @@ */ #define rt_mutex_deadlock_check(l) (0) +#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) +#define rt_mutex_deadlock_account_unlock(l) do { } while (0) #define debug_rt_mutex_init_waiter(w) do { } while (0) #define debug_rt_mutex_free_waiter(w) do { } while (0) #define debug_rt_mutex_lock(l) do { } while (0) diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 97c048c494f0..6f8f68edb700 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -101,8 +101,8 @@ enum rtmutex_chainwalk { extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); -extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); -extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); +extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, + struct task_struct *proxy_owner); extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task); @@ -111,13 +111,9 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter); extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter); -extern int rt_mutex_futex_trylock(struct rt_mutex *l); -extern int __rt_mutex_futex_trylock(struct rt_mutex *l); - -extern void rt_mutex_futex_unlock(struct rt_mutex *lock); -extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wqh); - +extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); +extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wqh); extern void rt_mutex_adjust_prio(struct task_struct *task); #ifdef CONFIG_DEBUG_RT_MUTEXES diff --git a/kernel/module.c b/kernel/module.c index 7ce940ab20dc..3b7aac2ea1e5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1779,6 +1779,7 @@ static int mod_sysfs_init(struct module *mod) if (err) mod_kobject_put(mod); + /* delay uevent until full sysfs population */ out: return err; } @@ -1812,6 +1813,7 @@ static int mod_sysfs_setup(struct module *mod, add_sect_attrs(mod, info); add_notes_attrs(mod, info); + kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; out_unreg_param: @@ -2107,21 +2109,6 @@ static int verify_export_symbols(struct module *mod) return 0; } -static bool ignore_undef_symbol(Elf_Half emachine, const char *name) -{ - /* - * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as - * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. - * i386 has a similar problem but may not deserve a fix. - * - * If we ever have to ignore many symbols, consider refactoring the code to - * only warn if referenced by a relocation. - */ - if (emachine == EM_386 || emachine == EM_X86_64) - return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); - return false; -} - /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { @@ -2163,10 +2150,8 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) break; } - /* Ok if weak or ignored. */ - if (!ksym && - (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || - ignore_undef_symbol(info->hdr->e_machine, name))) + /* Ok if weak. */ + if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) break; pr_warn("%s: Unknown symbol %s (err %li)\n", @@ -3322,9 +3307,6 @@ static noinline int do_init_module(struct module *mod) blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); - /* Delay uevent until module has finished its init routine */ - kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); - /* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock. For example, a newly @@ -3613,7 +3595,6 @@ static int load_module(struct load_info *info, const char __user *uargs, return do_init_module(mod); bug_cleanup: - mod->state = MODULE_STATE_GOING; /* module_bug_cleanup needs module_mutex protection */ mutex_lock(&module_mutex); module_bug_cleanup(mod); diff --git a/kernel/pid.c b/kernel/pid.c index ccfdb56321c6..5fe7cdb6d05f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -68,7 +68,9 @@ static inline int mk_pid(struct pid_namespace *pid_ns, * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { - .kref = KREF_INIT(2), + .kref = { + .refcount = ATOMIC_INIT(2), + }, .pidmap = { [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }, diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 77fef12bb45b..0336ab14b408 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -1528,10 +1528,9 @@ end: int swsusp_check(void) { int error; - void *holder; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, - FMODE_READ | FMODE_EXCL, &holder); + FMODE_READ, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); @@ -1557,7 +1556,7 @@ int swsusp_check(void) put: if (error) - blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(hib_resume_bdev, FMODE_READ); else pr_debug("PM: Image signature found, resuming\n"); } else { diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 78e354b1c593..1896386e16bb 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -38,19 +38,23 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) { struct rb_node *node; struct wakelock *wl; - int len = 0; + char *str = buf; + char *end = buf + PAGE_SIZE; mutex_lock(&wakelocks_lock); for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { wl = rb_entry(node, struct wakelock, node); if (wl->ws.active == show_active) - len += sysfs_emit_at(buf, len, "%s ", wl->name); + str += scnprintf(str, end - str, "%s ", wl->name); } - len += sysfs_emit_at(buf, len, "\n"); + if (str > buf) + str--; + + str += scnprintf(str, end - str, "\n"); mutex_unlock(&wakelocks_lock); - return len; + return (str - buf); } #if CONFIG_PM_WAKELOCKS_LIMIT > 0 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 460806df3b26..96ed76de3b46 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2045,15 +2045,8 @@ static int __init console_setup(char *str) char *s, *options, *brl_options = NULL; int idx; - /* - * console="" or console=null have been suggested as a way to - * disable console output. Use ttynull that has been created - * for exacly this purpose. - */ - if (str[0] == 0 || strcmp(str, "null") == 0) { - __add_preferred_console("ttynull", 0, NULL, NULL); + if (str[0] == 0) return 1; - } if (_braille_console_setup(&str, &brl_options)) return 1; diff --git a/kernel/profile.c b/kernel/profile.c index 927a0345e259..9cd8e18e6f18 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -38,8 +38,7 @@ struct profile_hit { #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) static atomic_t *prof_buffer; -static unsigned long prof_len; -static unsigned short int prof_shift; +static unsigned long prof_len, prof_shift; int prof_on __read_mostly; EXPORT_SYMBOL_GPL(prof_on); @@ -64,8 +63,8 @@ int profile_setup(char *str) if (str[strlen(sleepstr)] == ',') str += strlen(sleepstr) + 1; if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel sleep profiling enabled (shift: %u)\n", + prof_shift = par; + pr_info("kernel sleep profiling enabled (shift: %ld)\n", prof_shift); #else pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); @@ -75,21 +74,21 @@ int profile_setup(char *str) if (str[strlen(schedstr)] == ',') str += strlen(schedstr) + 1; if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel schedule profiling enabled (shift: %u)\n", + prof_shift = par; + pr_info("kernel schedule profiling enabled (shift: %ld)\n", prof_shift); } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { prof_on = KVM_PROFILING; if (str[strlen(kvmstr)] == ',') str += strlen(kvmstr) + 1; if (get_option(&str, &par)) - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); - pr_info("kernel KVM profiling enabled (shift: %u)\n", + prof_shift = par; + pr_info("kernel KVM profiling enabled (shift: %ld)\n", prof_shift); } else if (get_option(&str, &par)) { - prof_shift = clamp(par, 0, BITS_PER_LONG - 1); + prof_shift = par; prof_on = CPU_PROFILING; - pr_info("kernel profiling enabled (shift: %u)\n", + pr_info("kernel profiling enabled (shift: %ld)\n", prof_shift); } return 1; @@ -476,7 +475,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned long p = *ppos; ssize_t read; char *pnt; - unsigned long sample_step = 1UL << prof_shift; + unsigned int sample_step = 1 << prof_shift; profile_flip_buffers(); if (p >= (prof_len+1)*sizeof(unsigned int)) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 5a1d8cc7ef4e..da8c358930fb 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -129,21 +129,6 @@ void __ptrace_unlink(struct task_struct *child) spin_unlock(&child->sighand->siglock); } -static bool looks_like_a_spurious_pid(struct task_struct *task) -{ - if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP)) - return false; - - if (task_pid_vnr(task) == task->ptrace_message) - return false; - /* - * The tracee changed its pid but the PTRACE_EVENT_EXEC event - * was not wait()'ed, most probably debugger targets the old - * leader which was destroyed in de_thread(). - */ - return true; -} - /* Ensure that nothing can wake it up, even SIGKILL */ static bool ptrace_freeze_traced(struct task_struct *task) { @@ -154,8 +139,7 @@ static bool ptrace_freeze_traced(struct task_struct *task) return ret; spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && - !__fatal_signal_pending(task)) { + if (task_is_traced(task) && !__fatal_signal_pending(task)) { task->state = __TASK_TRACED; ret = true; } diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 99378130a42f..7dde1b9918e4 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -17,13 +17,14 @@ endif obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o -obj-y += wait.o completion.o idle.o +obj-y += wait.o completion.o idle.o sched_avg.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o -obj-$(CONFIG_SCHED_WALT) += walt.o +obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_SCHED_TUNE) += tune.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o +obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c new file mode 100644 index 000000000000..c23b1aebf344 --- /dev/null +++ b/kernel/sched/boost.c @@ -0,0 +1,235 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ + +#include "sched.h" +#include +#include +#include + +/* + * Scheduler boost is a mechanism to temporarily place tasks on CPUs + * with higher capacity than those where a task would have normally + * ended up with their load characteristics. Any entity enabling + * boost is responsible for disabling it as well. + */ + +unsigned int sysctl_sched_boost; +static enum sched_boost_policy boost_policy; +static enum sched_boost_policy boost_policy_dt = SCHED_BOOST_NONE; +static DEFINE_MUTEX(boost_mutex); +static unsigned int freq_aggr_threshold_backup; + +static inline void boost_kick(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags)) + smp_send_reschedule(cpu); +} + +static void boost_kick_cpus(void) +{ + int i; + struct cpumask kick_mask; + u32 nr_running; + + if (boost_policy != SCHED_BOOST_ON_BIG) + return; + + cpumask_andnot(&kick_mask, cpu_online_mask, cpu_isolated_mask); + + for_each_cpu(i, &kick_mask) { + /* + * kick only "small" cluster + */ + if (cpu_capacity(i) != max_capacity) { + nr_running = ACCESS_ONCE(cpu_rq(i)->nr_running); + + /* + * make sense to interrupt CPU if its run-queue + * has something running in order to check for + * migration afterwards, otherwise skip it. + */ + if (nr_running) + boost_kick(i); + } + } +} + +int got_boost_kick(void) +{ + int cpu = smp_processor_id(); + struct rq *rq = cpu_rq(cpu); + + return test_bit(BOOST_KICK, &rq->hmp_flags); +} + +void clear_boost_kick(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + clear_bit(BOOST_KICK, &rq->hmp_flags); +} + +/* + * Scheduler boost type and boost policy might at first seem unrelated, + * however, there exists a connection between them that will allow us + * to use them interchangeably during placement decisions. We'll explain + * the connection here in one possible way so that the implications are + * clear when looking at placement policies. + * + * When policy = SCHED_BOOST_NONE, type is either none or RESTRAINED + * When policy = SCHED_BOOST_ON_ALL or SCHED_BOOST_ON_BIG, type can + * neither be none nor RESTRAINED. + */ +static void set_boost_policy(int type) +{ + if (type == SCHED_BOOST_NONE || type == RESTRAINED_BOOST) { + boost_policy = SCHED_BOOST_NONE; + return; + } + + if (boost_policy_dt) { + boost_policy = boost_policy_dt; + return; + } + + if (min_possible_efficiency != max_possible_efficiency) { + boost_policy = SCHED_BOOST_ON_BIG; + return; + } + + boost_policy = SCHED_BOOST_ON_ALL; +} + +enum sched_boost_policy sched_boost_policy(void) +{ + return boost_policy; +} + +static bool verify_boost_params(int old_val, int new_val) +{ + /* + * Boost can only be turned on or off. There is no possiblity of + * switching from one boost type to another or to set the same + * kind of boost several times. + */ + return !(!!old_val == !!new_val); +} + +static void _sched_set_boost(int old_val, int type) +{ + switch (type) { + case NO_BOOST: + if (old_val == FULL_THROTTLE_BOOST) + core_ctl_set_boost(false); + else if (old_val == CONSERVATIVE_BOOST) + restore_cgroup_boost_settings(); + else + update_freq_aggregate_threshold( + freq_aggr_threshold_backup); + break; + + case FULL_THROTTLE_BOOST: + core_ctl_set_boost(true); + boost_kick_cpus(); + break; + + case CONSERVATIVE_BOOST: + update_cgroup_boost_settings(); + boost_kick_cpus(); + break; + + case RESTRAINED_BOOST: + freq_aggr_threshold_backup = + update_freq_aggregate_threshold(1); + break; + + default: + WARN_ON(1); + return; + } + + set_boost_policy(type); + sysctl_sched_boost = type; + trace_sched_set_boost(type); +} + +void sched_boost_parse_dt(void) +{ + struct device_node *sn; + const char *boost_policy; + + sn = of_find_node_by_path("/sched-hmp"); + if (!sn) + return; + + if (!of_property_read_string(sn, "boost-policy", &boost_policy)) { + if (!strcmp(boost_policy, "boost-on-big")) + boost_policy_dt = SCHED_BOOST_ON_BIG; + else if (!strcmp(boost_policy, "boost-on-all")) + boost_policy_dt = SCHED_BOOST_ON_ALL; + } +} + +int sched_set_boost(int type) +{ + int ret = 0; + + mutex_lock(&boost_mutex); + + if (verify_boost_params(sysctl_sched_boost, type)) + _sched_set_boost(sysctl_sched_boost, type); + else + ret = -EINVAL; + + mutex_unlock(&boost_mutex); + return ret; +} + +int sched_boost_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + unsigned int *data = (unsigned int *)table->data; + unsigned int old_val; + + mutex_lock(&boost_mutex); + + old_val = *data; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret || !write) + goto done; + + if (verify_boost_params(old_val, *data)) { + _sched_set_boost(old_val, *data); + } else { + *data = old_val; + ret = -EINVAL; + } + +done: + mutex_unlock(&boost_mutex); + return ret; +} + +int sched_boost(void) +{ + return sysctl_sched_boost; +} diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 74dc6b61e285..015e2b46bd7a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -25,6 +25,11 @@ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, * Thomas Gleixner, Mike Kravetz */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2017 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include #include @@ -75,6 +80,8 @@ #include #include #include +#include +#include #include #include @@ -91,10 +98,10 @@ #include "sched.h" #include "../workqueue_internal.h" #include "../smpboot.h" +#include "../time/tick-internal.h" #define CREATE_TRACE_POINTS #include -#include "walt.h" static atomic_t __su_instances; @@ -128,6 +135,8 @@ void su_exit(void) atomic_dec(&__su_instances); } +ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head); + DEFINE_MUTEX(sched_domains_mutex); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -892,6 +901,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & ENQUEUE_RESTORE)) sched_info_queued(rq, p); p->sched_class->enqueue_task(rq, p, flags); + trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]); } static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) @@ -900,6 +910,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & DEQUEUE_SAVE)) sched_info_dequeued(rq, p); p->sched_class->dequeue_task(rq, p, flags); + trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]); } void activate_task(struct rq *rq, struct task_struct *p, int flags) @@ -915,6 +926,9 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) if (task_contributes_to_load(p)) rq->nr_uninterruptible++; + if (flags & DEQUEUE_SLEEP) + clear_ed_task(p, rq); + dequeue_task(rq, p, flags); } @@ -1164,6 +1178,8 @@ struct migration_arg { */ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) { + int src_cpu; + if (unlikely(!cpu_active(dest_cpu))) return rq; @@ -1171,6 +1187,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_ if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) return rq; + src_cpu = cpu_of(rq); rq = move_queued_task(rq, p, dest_cpu); return rq; @@ -1186,6 +1203,8 @@ static int migration_cpu_stop(void *data) struct migration_arg *arg = data; struct task_struct *p = arg->task; struct rq *rq = this_rq(); + int src_cpu = cpu_of(rq); + bool moved = false; /* * The original target cpu might have gone down and we might @@ -1206,12 +1225,18 @@ static int migration_cpu_stop(void *data) * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because * we're holding p->pi_lock. */ - if (task_rq(p) == rq && task_on_rq_queued(p)) + if (task_rq(p) == rq && task_on_rq_queued(p)) { rq = __migrate_task(rq, p, arg->dest_cpu); + moved = true; + } raw_spin_unlock(&rq->lock); raw_spin_unlock(&p->pi_lock); local_irq_enable(); + + if (moved) + notify_migration(src_cpu, arg->dest_cpu, false, p); + return 0; } @@ -1270,6 +1295,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, struct rq *rq; unsigned int dest_cpu; int ret = 0; + cpumask_t allowed_mask; rq = task_rq_lock(p, &flags); @@ -1285,18 +1311,25 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, if (cpumask_equal(&p->cpus_allowed, new_mask)) goto out; - if (!cpumask_intersects(new_mask, cpu_active_mask)) { - ret = -EINVAL; - goto out; + cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + cpumask_and(&allowed_mask, &allowed_mask, cpu_active_mask); + + dest_cpu = cpumask_any(&allowed_mask); + if (dest_cpu >= nr_cpu_ids) { + cpumask_and(&allowed_mask, cpu_active_mask, new_mask); + dest_cpu = cpumask_any(&allowed_mask); + if (dest_cpu >= nr_cpu_ids) { + ret = -EINVAL; + goto out; + } } do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), &allowed_mask)) goto out; - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ @@ -1360,7 +1393,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) #endif #endif - trace_sched_migrate_task(p, new_cpu); + trace_sched_migrate_task(p, new_cpu, pct_task_load(p)); if (task_cpu(p) != new_cpu) { if (p->sched_class->migrate_task_rq) @@ -1368,7 +1401,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.nr_migrations++; perf_event_task_migrate(p); - walt_fixup_busy_time(p, new_cpu); + fixup_busy_time(p, new_cpu); } __set_task_cpu(p, new_cpu); @@ -1620,12 +1653,13 @@ EXPORT_SYMBOL_GPL(kick_process); /* * ->cpus_allowed is protected by both rq->lock and p->pi_lock */ -static int select_fallback_rq(int cpu, struct task_struct *p) +static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso) { int nid = cpu_to_node(cpu); const struct cpumask *nodemask = NULL; - enum { cpuset, possible, fail } state = cpuset; + enum { cpuset, possible, fail, bug } state = cpuset; int dest_cpu; + int isolated_candidate = -1; /* * If the node that the cpu is on has been offlined, cpu_to_node() @@ -1641,6 +1675,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p) continue; if (!cpu_active(dest_cpu)) continue; + if (cpu_isolated(dest_cpu)) + continue; if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) return dest_cpu; } @@ -1653,6 +1689,16 @@ static int select_fallback_rq(int cpu, struct task_struct *p) continue; if (!cpu_active(dest_cpu)) continue; + if (cpu_isolated(dest_cpu)) { + if (allow_iso) + isolated_candidate = dest_cpu; + continue; + } + goto out; + } + + if (isolated_candidate != -1) { + dest_cpu = isolated_candidate; goto out; } @@ -1671,6 +1717,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p) break; case fail: + allow_iso = true; + state = bug; + break; + + case bug: BUG(); break; } @@ -1699,6 +1750,8 @@ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags, int sibling_count_hint) { + bool allow_isolated = (p->flags & PF_KTHREAD); + lockdep_assert_held(&p->pi_lock); if (p->nr_cpus_allowed > 1) @@ -1716,13 +1769,14 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags, * not worry about this generic constraint ] */ if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || - !cpu_online(cpu))) - cpu = select_fallback_rq(task_cpu(p), p); + !cpu_online(cpu)) || + (cpu_isolated(cpu) && !allow_isolated)) + cpu = select_fallback_rq(task_cpu(p), p, allow_isolated); return cpu; } -static void update_avg(u64 *avg, u64 sample) +void update_avg(u64 *avg, u64 sample) { s64 diff = sample - *avg; *avg += diff >> 3; @@ -1795,6 +1849,7 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) { check_preempt_curr(rq, p, wake_flags); + p->state = TASK_RUNNING; trace_sched_wakeup(p); @@ -1886,6 +1941,8 @@ void sched_ttwu_pending(void) void scheduler_ipi(void) { + int cpu = smp_processor_id(); + /* * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting * TIF_NEED_RESCHED remotely (for the first time) will also send @@ -1893,9 +1950,18 @@ void scheduler_ipi(void) */ preempt_fold_need_resched(); - if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) + if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() && + !got_boost_kick()) return; + if (got_boost_kick()) { + struct rq *rq = cpu_rq(cpu); + + if (rq->curr->sched_class == &fair_sched_class) + check_for_migration(rq, rq->curr); + clear_boost_kick(cpu); + } + /* * Not all reschedule IPI handlers call irq_enter/irq_exit, since * traditionally all their work was done from the interrupt return @@ -1915,7 +1981,7 @@ void scheduler_ipi(void) /* * Check if someone kicked us for doing the nohz idle load balance. */ - if (unlikely(got_nohz_idle_kick())) { + if (unlikely(got_nohz_idle_kick()) && !cpu_isolated(cpu)) { this_rq()->idle_balance = 1; raise_softirq_irqoff(SCHED_SOFTIRQ); } @@ -1960,9 +2026,6 @@ out: bool cpus_share_cache(int this_cpu, int that_cpu) { - if (this_cpu == that_cpu) - return true; - return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } #endif /* CONFIG_SMP */ @@ -2010,9 +2073,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, unsigned long flags; int cpu, src_cpu, success = 0; #ifdef CONFIG_SMP + unsigned int old_load; struct rq *rq; u64 wallclock; + struct related_thread_group *grp = NULL; #endif + bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER); + bool check_group = false; + + wake_flags &= ~WF_NO_NOTIFIER; /* * If we are going to wake up a thread waiting for CONDITION we @@ -2096,11 +2165,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, rq = cpu_rq(task_cpu(p)); raw_spin_lock(&rq->lock); - wallclock = walt_ktime_clock(); - walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); - walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + old_load = task_load(p); + wallclock = sched_ktime_clock(); + update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + cpufreq_update_util(rq, 0); raw_spin_unlock(&rq->lock); + rcu_read_lock(); + grp = task_related_thread_group(p); + if (update_preferred_cluster(grp, p, old_load)) + set_preferred_cluster(grp); + rcu_read_unlock(); + check_group = grp != NULL; + p->sched_contributes_to_load = !!task_contributes_to_load(p); p->state = TASK_WAKING; @@ -2117,13 +2195,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, set_task_cpu(p, cpu); } + note_task_waking(p, wallclock); #endif /* CONFIG_SMP */ - ttwu_queue(p, cpu); stat: ttwu_stat(p, cpu, wake_flags); out: - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + raw_spin_unlock(&p->pi_lock); + + if (freq_notif_allowed) { + if (!same_freq_domain(src_cpu, cpu)) { + check_for_freq_change(cpu_rq(cpu), + false, check_group); + check_for_freq_change(cpu_rq(src_cpu), + false, check_group); + } else if (success) { + check_for_freq_change(cpu_rq(cpu), true, false); + } + } + + local_irq_restore(flags); return success; } @@ -2170,17 +2261,20 @@ static void try_to_wake_up_local(struct task_struct *p) trace_sched_waking(p); if (!task_on_rq_queued(p)) { - u64 wallclock = walt_ktime_clock(); + u64 wallclock = sched_ktime_clock(); - walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); - walt_update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + cpufreq_update_util(rq, 0); ttwu_activate(rq, p, ENQUEUE_WAKEUP); + note_task_waking(p, wallclock); } ttwu_do_wakeup(rq, p, 0); ttwu_stat(p, smp_processor_id(), 0); out: raw_spin_unlock(&p->pi_lock); + /* Todo : Send cpufreq notifier */ } /** @@ -2201,6 +2295,26 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_process_no_notif - Wake up a specific process without notifying + * governor + * @p: The process to be woken up. + * + * Attempt to wake up the nominated process and move it to the set of runnable + * processes. + * + * Return: 1 if the process was woken up, 0 if it was already running. + * + * It may be assumed that this function implies a write memory barrier before + * changing the task state if and only if any tasks are woken up. + */ +int wake_up_process_no_notif(struct task_struct *p) +{ + WARN_ON(task_is_stopped_or_traced(p)); + return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER, 1); +} +EXPORT_SYMBOL(wake_up_process_no_notif); + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0, 1); @@ -2225,6 +2339,44 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_yielded = 0; } +#ifdef CONFIG_SCHED_HMP +/* + * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field + * + * Stop accounting (exiting) task's future cpu usage + * + * We need this so that reset_all_windows_stats() can function correctly. + * reset_all_window_stats() depends on do_each_thread/for_each_thread task + * iterators to reset *all* task's statistics. Exiting tasks however become + * invisible to those iterators. sched_exit() is called on a exiting task prior + * to being removed from task_list, which will let reset_all_window_stats() + * function correctly. + */ +void sched_exit(struct task_struct *p) +{ + unsigned long flags; + struct rq *rq; + u64 wallclock; + + sched_set_group_id(p, 0); + + rq = task_rq_lock(p, &flags); + + /* rq->curr == p */ + wallclock = sched_ktime_clock(); + update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + dequeue_task(rq, p, 0); + reset_task_stats(p); + p->ravg.mark_start = wallclock; + p->ravg.sum_history[0] = EXITING_TASK_MARKER; + + enqueue_task(rq, p, 0); + clear_ed_task(p, rq); + task_rq_unlock(rq, p, &flags); + free_task_load_ptrs(p); +} +#endif /* CONFIG_SCHED_HMP */ + /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. @@ -2246,7 +2398,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #endif INIT_LIST_HEAD(&p->se.group_node); - walt_init_new_task_load(p); #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = NULL; @@ -2335,7 +2486,10 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, int sched_fork(unsigned long clone_flags, struct task_struct *p) { unsigned long flags; - int cpu = get_cpu(); + int cpu; + + init_new_task_load(p); + cpu = get_cpu(); __sched_fork(clone_flags, p); /* @@ -2527,11 +2681,10 @@ void wake_up_new_task(struct task_struct *p) unsigned long flags; struct rq *rq; + add_new_task_to_grp(p); raw_spin_lock_irqsave(&p->pi_lock, flags); p->state = TASK_RUNNING; - walt_init_new_task_load(p); - /* Initialize new task's runnable average */ init_entity_runnable_average(&p->se); #ifdef CONFIG_SMP @@ -2546,10 +2699,9 @@ void wake_up_new_task(struct task_struct *p) __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1)); #endif rq = __task_rq_lock(p); + mark_task_starting(p); update_rq_clock(rq); post_init_entity_util_avg(&p->se); - - walt_mark_task_starting(p); activate_task(rq, p, ENQUEUE_WAKEUP_NEW); p->on_rq = TASK_ON_RQ_QUEUED; trace_sched_wakeup_new(p); @@ -2976,7 +3128,7 @@ void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) *load = rq->load.weight; } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) /* * sched_exec - execve() is a valuable balancing opportunity, because at @@ -2988,13 +3140,17 @@ void sched_exec(void) unsigned long flags; int dest_cpu, curr_cpu; +#ifdef CONFIG_SCHED_HMP + return; +#endif + raw_spin_lock_irqsave(&p->pi_lock, flags); curr_cpu = task_cpu(p); dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1); if (dest_cpu == smp_processor_id()) goto unlock; - if (likely(cpu_active(dest_cpu))) { + if (likely(cpu_active(dest_cpu) && likely(!cpu_isolated(dest_cpu)))) { struct migration_arg arg = { p, dest_cpu }; raw_spin_unlock_irqrestore(&p->pi_lock, flags); @@ -3065,19 +3221,31 @@ void scheduler_tick(void) int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; + u64 wallclock; + bool early_notif; + u32 old_load; + struct related_thread_group *grp; sched_clock_tick(); raw_spin_lock(&rq->lock); - walt_set_window_start(rq); - walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, - walt_ktime_clock(), 0); + old_load = task_load(curr); + set_window_start(rq); update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); update_cpu_load_active(rq); calc_global_load_tick(rq); + wallclock = sched_ktime_clock(); + update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + + cpufreq_update_util(rq, 0); + early_notif = early_detection_notify(rq, wallclock); raw_spin_unlock(&rq->lock); + if (early_notif) + atomic_notifier_call_chain(&load_alert_notifier_head, + 0, (void *)(long)cpu); + perf_event_task_tick(); #ifdef CONFIG_SMP @@ -3086,8 +3254,17 @@ void scheduler_tick(void) #endif rq_last_tick_reset(rq); + rcu_read_lock(); + grp = task_related_thread_group(curr); + if (update_preferred_cluster(grp, curr, old_load)) + set_preferred_cluster(grp); + rcu_read_unlock(); + if (curr->sched_class == &fair_sched_class) check_for_migration(rq, curr); + + if (cpu == tick_do_timer_cpu) + core_ctl_check(wallclock); } #ifdef CONFIG_NO_HZ_FULL @@ -3420,16 +3597,20 @@ static void __sched notrace __schedule(bool preempt) update_rq_clock(rq); next = pick_next_task(rq, prev); - wallclock = walt_ktime_clock(); - walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); - walt_update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; BUG_ON(task_cpu(next) != cpu_of(rq)); + wallclock = sched_ktime_clock(); if (likely(prev != next)) { + update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); + update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); + cpufreq_update_util(rq, 0); + if (!is_idle_task(prev) && !prev->on_rq) + update_avg_burst(prev); + #ifdef CONFIG_SCHED_WALT if (!prev->on_rq) prev->last_sleep_ts = wallclock; @@ -3438,10 +3619,14 @@ static void __sched notrace __schedule(bool preempt) rq->curr = next; ++*switch_count; + set_task_last_switch_out(prev, wallclock); + trace_sched_switch(preempt, prev, next); rq = context_switch(rq, prev, next); /* unlocks the rq */ cpu = cpu_of(rq); } else { + update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); + cpufreq_update_util(rq, 0); lockdep_unpin_lock(&rq->lock); raw_spin_unlock_irq(&rq->lock); } @@ -4670,6 +4855,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) cpumask_var_t cpus_allowed, new_mask; struct task_struct *p; int retval; + int dest_cpu; + cpumask_t allowed_mask; rcu_read_lock(); @@ -4731,20 +4918,26 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: - retval = __set_cpus_allowed_ptr(p, new_mask, true); - - if (!retval) { - cpuset_cpus_allowed(p, cpus_allowed); - if (!cpumask_subset(new_mask, cpus_allowed)) { - /* - * We must have raced with a concurrent cpuset - * update. Just reset the cpus_allowed to the - * cpuset's cpus_allowed - */ - cpumask_copy(new_mask, cpus_allowed); - goto again; + cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); + if (dest_cpu < nr_cpu_ids) { + retval = __set_cpus_allowed_ptr(p, new_mask, true); + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } } + } else { + retval = -EINVAL; } + out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: @@ -5501,18 +5694,54 @@ static struct task_struct fake_task = { }; /* - * Migrate all tasks from the rq, sleeping tasks will be migrated by - * try_to_wake_up()->select_task_rq(). + * Remove a task from the runqueue and pretend that it's migrating. This + * should prevent migrations for the detached task and disallow further + * changes to tsk_cpus_allowed. + */ +static void +detach_one_task(struct task_struct *p, struct rq *rq, struct list_head *tasks) +{ + lockdep_assert_held(&rq->lock); + + p->on_rq = TASK_ON_RQ_MIGRATING; + deactivate_task(rq, p, 0); + list_add(&p->se.group_node, tasks); +} + +static void attach_tasks(struct list_head *tasks, struct rq *rq) +{ + struct task_struct *p; + + lockdep_assert_held(&rq->lock); + + while (!list_empty(tasks)) { + p = list_first_entry(tasks, struct task_struct, se.group_node); + list_del_init(&p->se.group_node); + + BUG_ON(task_rq(p) != rq); + activate_task(rq, p, 0); + p->on_rq = TASK_ON_RQ_QUEUED; + } +} + +/* + * Migrate all tasks (not pinned if pinned argument say so) from the rq, + * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq(). * * Called with rq->lock held even though we'er in stop_machine() and * there's no concurrency possible, we hold the required locks anyway * because of lock validation efforts. */ -static void migrate_tasks(struct rq *dead_rq) +static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks) { struct rq *rq = dead_rq; struct task_struct *next, *stop = rq->stop; int dest_cpu; + unsigned int num_pinned_kthreads = 1; /* this thread */ + LIST_HEAD(tasks); + cpumask_t avail_cpus; + + cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); /* * Fudge the rq selection such that the below task selection loop @@ -5548,6 +5777,14 @@ static void migrate_tasks(struct rq *dead_rq) BUG_ON(!next); next->sched_class->put_prev_task(rq, next); + if (!migrate_pinned_tasks && next->flags & PF_KTHREAD && + !cpumask_intersects(&avail_cpus, &next->cpus_allowed)) { + detach_one_task(next, rq, &tasks); + num_pinned_kthreads += 1; + lockdep_unpin_lock(&rq->lock); + continue; + } + /* * Rules for changing task_struct::cpus_allowed are holding * both pi_lock and rq->lock, such that holding either @@ -5566,26 +5803,271 @@ static void migrate_tasks(struct rq *dead_rq) * Since we're inside stop-machine, _nothing_ should have * changed the task, WARN if weird stuff happened, because in * that case the above rq->lock drop is a fail too. + * However, during cpu isolation the load balancer might have + * interferred since we don't stop all CPUs. Ignore warning for + * this case. */ - if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { + if (task_rq(next) != rq || !task_on_rq_queued(next)) { + WARN_ON(migrate_pinned_tasks); raw_spin_unlock(&next->pi_lock); continue; } /* Find suitable destination for @next, with force if needed. */ - dest_cpu = select_fallback_rq(dead_rq->cpu, next); + dest_cpu = select_fallback_rq(dead_rq->cpu, next, false); rq = __migrate_task(rq, next, dest_cpu); if (rq != dead_rq) { + raw_spin_unlock(&next->pi_lock); raw_spin_unlock(&rq->lock); + notify_migration(dead_rq->cpu, dest_cpu, true, next); rq = dead_rq; + raw_spin_lock(&next->pi_lock); raw_spin_lock(&rq->lock); } raw_spin_unlock(&next->pi_lock); } rq->stop = stop; + + if (num_pinned_kthreads > 1) + attach_tasks(&tasks, rq); +} + +static void set_rq_online(struct rq *rq); +static void set_rq_offline(struct rq *rq); + +int do_isolation_work_cpu_stop(void *data) +{ + unsigned int cpu = smp_processor_id(); + struct rq *rq = cpu_rq(cpu); + + watchdog_disable(cpu); + + irq_migrate_all_off_this_cpu(); + + local_irq_disable(); + + sched_ttwu_pending(); + + raw_spin_lock(&rq->lock); + + /* + * Temporarily mark the rq as offline. This will allow us to + * move tasks off the CPU. + */ + if (rq->rd) { + BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); + set_rq_offline(rq); + } + + migrate_tasks(rq, false); + + if (rq->rd) + set_rq_online(rq); + raw_spin_unlock(&rq->lock); + + /* + * We might have been in tickless state. Clear NOHZ flags to avoid + * us being kicked for helping out with balancing + */ + nohz_balance_clear_nohz_mask(cpu); + + clear_hmp_request(cpu); + local_irq_enable(); + return 0; +} + +int do_unisolation_work_cpu_stop(void *data) +{ + watchdog_enable(smp_processor_id()); + return 0; +} + +static void init_sched_groups_capacity(int cpu, struct sched_domain *sd); + +static void sched_update_group_capacities(int cpu) +{ + struct sched_domain *sd; + + mutex_lock(&sched_domains_mutex); + rcu_read_lock(); + + for_each_domain(cpu, sd) { + int balance_cpu = group_balance_cpu(sd->groups); + + init_sched_groups_capacity(cpu, sd); + /* + * Need to ensure this is also called with balancing + * cpu. + */ + if (cpu != balance_cpu) + init_sched_groups_capacity(balance_cpu, sd); + } + + rcu_read_unlock(); + mutex_unlock(&sched_domains_mutex); +} + +static unsigned int cpu_isolation_vote[NR_CPUS]; + +int sched_isolate_count(const cpumask_t *mask, bool include_offline) +{ + cpumask_t count_mask = CPU_MASK_NONE; + + if (include_offline) { + cpumask_complement(&count_mask, cpu_online_mask); + cpumask_or(&count_mask, &count_mask, cpu_isolated_mask); + cpumask_and(&count_mask, &count_mask, mask); + } else { + cpumask_and(&count_mask, mask, cpu_isolated_mask); + } + + return cpumask_weight(&count_mask); +} + +/* + * 1) CPU is isolated and cpu is offlined: + * Unisolate the core. + * 2) CPU is not isolated and CPU is offlined: + * No action taken. + * 3) CPU is offline and request to isolate + * Request ignored. + * 4) CPU is offline and isolated: + * Not a possible state. + * 5) CPU is online and request to isolate + * Normal case: Isolate the CPU + * 6) CPU is not isolated and comes back online + * Nothing to do + * + * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY + * calling sched_unisolate_cpu() on a CPU that the client previously isolated. + * Client is also responsible for unisolating when a core goes offline + * (after CPU is marked offline). + */ +int sched_isolate_cpu(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + cpumask_t avail_cpus; + int ret_code = 0; + u64 start_time = 0; + + if (trace_sched_isolate_enabled()) + start_time = sched_clock(); + + cpu_maps_update_begin(); + + cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); + + /* We cannot isolate ALL cpus in the system */ + if (cpumask_weight(&avail_cpus) == 1) { + ret_code = -EINVAL; + goto out; + } + + if (!cpu_online(cpu)) { + ret_code = -EINVAL; + goto out; + } + + if (++cpu_isolation_vote[cpu] > 1) + goto out; + + /* + * There is a race between watchdog being enabled by hotplug and + * core isolation disabling the watchdog. When a CPU is hotplugged in + * and the hotplug lock has been released the watchdog thread might + * not have run yet to enable the watchdog. + * We have to wait for the watchdog to be enabled before proceeding. + */ + if (!watchdog_configured(cpu)) { + msleep(20); + if (!watchdog_configured(cpu)) { + --cpu_isolation_vote[cpu]; + ret_code = -EBUSY; + goto out; + } + } + + set_cpu_isolated(cpu, true); + cpumask_clear_cpu(cpu, &avail_cpus); + + /* Migrate timers */ + smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1); + smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1); + + stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); + + calc_load_migrate(rq); + update_max_interval(); + sched_update_group_capacities(cpu); + +out: + cpu_maps_update_done(); + trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0], + start_time, 1); + return ret_code; +} + +/* + * Note: The client calling sched_isolate_cpu() is repsonsible for ONLY + * calling sched_unisolate_cpu() on a CPU that the client previously isolated. + * Client is also responsible for unisolating when a core goes offline + * (after CPU is marked offline). + */ +int sched_unisolate_cpu_unlocked(int cpu) +{ + int ret_code = 0; + struct rq *rq = cpu_rq(cpu); + u64 start_time = 0; + + if (trace_sched_isolate_enabled()) + start_time = sched_clock(); + + if (!cpu_isolation_vote[cpu]) { + ret_code = -EINVAL; + goto out; + } + + if (--cpu_isolation_vote[cpu]) + goto out; + + if (cpu_online(cpu)) { + unsigned long flags; + + raw_spin_lock_irqsave(&rq->lock, flags); + rq->age_stamp = sched_clock_cpu(cpu); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } + + set_cpu_isolated(cpu, false); + update_max_interval(); + sched_update_group_capacities(cpu); + + if (cpu_online(cpu)) { + stop_cpus(cpumask_of(cpu), do_unisolation_work_cpu_stop, 0); + + /* Kick CPU to immediately do load balancing */ + if (!test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) + smp_send_reschedule(cpu); + } + +out: + trace_sched_isolate(cpu, cpumask_bits(cpu_isolated_mask)[0], + start_time, 0); + return ret_code; +} + +int sched_unisolate_cpu(int cpu) +{ + int ret_code; + + cpu_maps_update_begin(); + ret_code = sched_unisolate_cpu_unlocked(cpu); + cpu_maps_update_done(); + return ret_code; } + #endif /* CONFIG_HOTPLUG_CPU */ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) @@ -5873,7 +6355,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_UP_PREPARE: raw_spin_lock_irqsave(&rq->lock, flags); - walt_set_window_start(rq); + set_window_start(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); rq->calc_load_update = calc_load_update; break; @@ -5894,17 +6376,18 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) sched_ttwu_pending(); /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); - walt_migrate_sync_cpu(cpu); + if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); set_rq_offline(rq); } - migrate_tasks(rq); + migrate_tasks(rq, true); BUG_ON(rq->nr_running != 1); /* the migration thread */ raw_spin_unlock_irqrestore(&rq->lock, flags); break; case CPU_DEAD: + clear_hmp_request(cpu); calc_load_migrate(rq); break; #endif @@ -6741,11 +7224,14 @@ build_sched_groups(struct sched_domain *sd, int cpu) static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) { struct sched_group *sg = sd->groups; + cpumask_t avail_mask; WARN_ON(!sg); do { - sg->group_weight = cpumask_weight(sched_group_cpus(sg)); + cpumask_andnot(&avail_mask, sched_group_cpus(sg), + cpu_isolated_mask); + sg->group_weight = cpumask_weight(&avail_mask); sg = sg->next; } while (sg != sd->groups); @@ -7545,7 +8031,8 @@ static int build_sched_domains(const struct cpumask *cpu_map, continue; for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent, tl++) { - init_sched_energy(i, sd, tl->energy); + if (energy_aware()) + init_sched_energy(i, sd, tl->energy); claim_allocations(i, sd); init_sched_groups_capacity(i, sd); } @@ -7867,6 +8354,8 @@ void __init sched_init_smp(void) hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); + update_cluster_topology(); + init_hrtick(); /* Move init over to a non-isolated CPU */ @@ -7909,8 +8398,15 @@ void __init sched_init(void) int i, j; unsigned long alloc_size = 0, ptr; +#ifdef CONFIG_SCHED_HMP + pr_info("HMP scheduling enabled.\n"); +#endif + BUG_ON(num_possible_cpus() > BITS_PER_LONG); + sched_boost_parse_dt(); + init_clusters(); + #ifdef CONFIG_FAIR_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif @@ -8027,12 +8523,53 @@ void __init sched_init(void) rq->online = 0; rq->idle_stamp = 0; rq->avg_idle = 2*sysctl_sched_migration_cost; - rq->max_idle_balance_cost = sysctl_sched_migration_cost; -#ifdef CONFIG_SCHED_WALT +#ifdef CONFIG_SCHED_HMP + cpumask_set_cpu(i, &rq->freq_domain_cpumask); + rq->hmp_stats.cumulative_runnable_avg = 0; + rq->window_start = 0; + rq->hmp_stats.nr_big_tasks = 0; + rq->hmp_flags = 0; rq->cur_irqload = 0; rq->avg_irqload = 0; rq->irqload_ts = 0; + rq->static_cpu_pwr_cost = 0; + rq->cc.cycles = 1; + rq->cc.time = 1; + rq->cstate = 0; + rq->wakeup_latency = 0; + rq->wakeup_energy = 0; + + /* + * All cpus part of same cluster by default. This avoids the + * need to check for rq->cluster being non-NULL in hot-paths + * like select_best_cpu() + */ + rq->cluster = &init_cluster; + rq->curr_runnable_sum = rq->prev_runnable_sum = 0; + rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; + memset(&rq->grp_time, 0, sizeof(struct group_cpu_time)); + rq->old_busy_time = 0; + rq->old_estimated_time = 0; + rq->old_busy_time_group = 0; + rq->hmp_stats.pred_demands_sum = 0; + rq->curr_table = 0; + rq->prev_top = 0; + rq->curr_top = 0; + + for (j = 0; j < NUM_TRACKED_WINDOWS; j++) { + memset(&rq->load_subs[j], 0, + sizeof(struct load_subtractions)); + + rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES, + sizeof(u8), GFP_NOWAIT); + + /* No other choice */ + BUG_ON(!rq->top_tasks[j]); + + clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]); + } #endif + rq->max_idle_balance_cost = sysctl_sched_migration_cost; INIT_LIST_HEAD(&rq->cfs_tasks); @@ -8048,6 +8585,11 @@ void __init sched_init(void) atomic_set(&rq->nr_iowait, 0); } + i = alloc_related_thread_groups(); + BUG_ON(i); + + set_hmp_defaults(); + set_load_weight(&init_task); #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -8072,6 +8614,7 @@ void __init sched_init(void) * when this runqueue becomes "idle". */ init_idle(current, smp_processor_id()); + init_new_task_load(current); calc_load_update = jiffies + LOAD_FREQ; @@ -8765,7 +9308,7 @@ int sched_rr_handler(struct ctl_table *table, int write, #ifdef CONFIG_CGROUP_SCHED -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +inline struct task_group *css_tg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct task_group, css) : NULL; } @@ -9165,6 +9708,13 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, #endif /* CONFIG_RT_GROUP_SCHED */ static struct cftype cpu_files[] = { +#ifdef CONFIG_SCHED_HMP + { + .name = "upmigrate_discourage", + .read_u64 = cpu_upmigrate_discourage_read_u64, + .write_u64 = cpu_upmigrate_discourage_write_u64, + }, +#endif #ifdef CONFIG_FAIR_GROUP_SCHED { .name = "shares", diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c new file mode 100644 index 000000000000..2f060a570061 --- /dev/null +++ b/kernel/sched/core_ctl.c @@ -0,0 +1,1171 @@ +/* Copyright (c) 2014-2017, 2020 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "core_ctl: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "sched.h" + +#define MAX_CPUS_PER_CLUSTER 4 +#define MAX_CLUSTERS 2 + +struct cluster_data { + bool inited; + unsigned int min_cpus; + unsigned int max_cpus; + unsigned int offline_delay_ms; + unsigned int busy_up_thres[MAX_CPUS_PER_CLUSTER]; + unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER]; + unsigned int active_cpus; + unsigned int num_cpus; + unsigned int nr_isolated_cpus; + cpumask_t cpu_mask; + unsigned int need_cpus; + unsigned int task_thres; + unsigned int max_nr; + s64 need_ts; + struct list_head lru; + bool pending; + spinlock_t pending_lock; + bool is_big_cluster; + bool enable; + int nrrun; + bool nrrun_changed; + struct task_struct *core_ctl_thread; + unsigned int first_cpu; + unsigned int boost; + struct kobject kobj; +}; + +struct cpu_data { + bool is_busy; + unsigned int busy; + unsigned int cpu; + bool not_preferred; + struct cluster_data *cluster; + struct list_head sib; + bool isolated_by_us; + unsigned int max_nr; +}; + +static DEFINE_PER_CPU(struct cpu_data, cpu_state); +static struct cluster_data cluster_state[MAX_CLUSTERS]; +static unsigned int num_clusters; + +#define for_each_cluster(cluster, idx) \ + for (; (idx) < num_clusters && ((cluster) = &cluster_state[idx]);\ + (idx)++) + +static DEFINE_SPINLOCK(state_lock); +static void apply_need(struct cluster_data *state); +static void wake_up_core_ctl_thread(struct cluster_data *state); +static bool initialized; + +static unsigned int get_active_cpu_count(const struct cluster_data *cluster); + +/* ========================= sysfs interface =========================== */ + +static ssize_t store_min_cpus(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + state->min_cpus = min(val, state->max_cpus); + wake_up_core_ctl_thread(state); + + return count; +} + +static ssize_t show_min_cpus(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->min_cpus); +} + +static ssize_t store_max_cpus(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + val = min(val, state->num_cpus); + state->max_cpus = val; + state->min_cpus = min(state->min_cpus, state->max_cpus); + wake_up_core_ctl_thread(state); + + return count; +} + +static ssize_t show_max_cpus(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->max_cpus); +} + +static ssize_t store_offline_delay_ms(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + state->offline_delay_ms = val; + apply_need(state); + + return count; +} + +static ssize_t show_task_thres(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->task_thres); +} + +static ssize_t store_task_thres(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + if (val < state->num_cpus) + return -EINVAL; + + state->task_thres = val; + apply_need(state); + + return count; +} + +static ssize_t show_offline_delay_ms(const struct cluster_data *state, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->offline_delay_ms); +} + +static ssize_t store_busy_up_thres(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val[MAX_CPUS_PER_CLUSTER]; + int ret, i; + + ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); + if (ret != 1 && ret != state->num_cpus) + return -EINVAL; + + if (ret == 1) { + for (i = 0; i < state->num_cpus; i++) + state->busy_up_thres[i] = val[0]; + } else { + for (i = 0; i < state->num_cpus; i++) + state->busy_up_thres[i] = val[i]; + } + apply_need(state); + return count; +} + +static ssize_t show_busy_up_thres(const struct cluster_data *state, char *buf) +{ + int i, count = 0; + + for (i = 0; i < state->num_cpus; i++) + count += snprintf(buf + count, PAGE_SIZE - count, "%u ", + state->busy_up_thres[i]); + + count += snprintf(buf + count, PAGE_SIZE - count, "\n"); + return count; +} + +static ssize_t store_busy_down_thres(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val[MAX_CPUS_PER_CLUSTER]; + int ret, i; + + ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); + if (ret != 1 && ret != state->num_cpus) + return -EINVAL; + + if (ret == 1) { + for (i = 0; i < state->num_cpus; i++) + state->busy_down_thres[i] = val[0]; + } else { + for (i = 0; i < state->num_cpus; i++) + state->busy_down_thres[i] = val[i]; + } + apply_need(state); + return count; +} + +static ssize_t show_busy_down_thres(const struct cluster_data *state, char *buf) +{ + int i, count = 0; + + for (i = 0; i < state->num_cpus; i++) + count += snprintf(buf + count, PAGE_SIZE - count, "%u ", + state->busy_down_thres[i]); + + count += snprintf(buf + count, PAGE_SIZE - count, "\n"); + return count; +} + +static ssize_t store_is_big_cluster(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + state->is_big_cluster = val ? 1 : 0; + return count; +} + +static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster); +} + +static ssize_t store_enable(struct cluster_data *state, + const char *buf, size_t count) +{ + unsigned int val; + bool bval; + + if (sscanf(buf, "%u\n", &val) != 1) + return -EINVAL; + + bval = !!val; + if (bval != state->enable) { + state->enable = bval; + apply_need(state); + } + + return count; +} + +static ssize_t show_enable(const struct cluster_data *state, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable); +} + +static ssize_t show_need_cpus(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus); +} + +static ssize_t show_active_cpus(const struct cluster_data *state, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", state->active_cpus); +} + +static ssize_t show_global_state(const struct cluster_data *state, char *buf) +{ + struct cpu_data *c; + struct cluster_data *cluster; + ssize_t count = 0; + unsigned int cpu; + + spin_lock_irq(&state_lock); + for_each_possible_cpu(cpu) { + c = &per_cpu(cpu_state, cpu); + cluster = c->cluster; + if (!cluster || !cluster->inited) + continue; + + count += snprintf(buf + count, PAGE_SIZE - count, + "CPU%u\n", cpu); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tCPU: %u\n", c->cpu); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tOnline: %u\n", + cpu_online(c->cpu)); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tIsolated: %u\n", + cpu_isolated(c->cpu)); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tFirst CPU: %u\n", + cluster->first_cpu); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tBusy%%: %u\n", c->busy); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tIs busy: %u\n", c->is_busy); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tNot preferred: %u\n", + c->not_preferred); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tNr running: %u\n", cluster->nrrun); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tActive CPUs: %u\n", get_active_cpu_count(cluster)); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tNeed CPUs: %u\n", cluster->need_cpus); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tNr isolated CPUs: %u\n", + cluster->nr_isolated_cpus); + count += snprintf(buf + count, PAGE_SIZE - count, + "\tBoost: %u\n", (unsigned int) cluster->boost); + } + spin_unlock_irq(&state_lock); + + return count; +} + +static ssize_t store_not_preferred(struct cluster_data *state, + const char *buf, size_t count) +{ + struct cpu_data *c; + unsigned int i; + unsigned int val[MAX_CPUS_PER_CLUSTER]; + unsigned long flags; + int ret; + + ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]); + if (ret != state->num_cpus) + return -EINVAL; + + spin_lock_irqsave(&state_lock, flags); + for (i = 0; i < state->num_cpus; i++) { + c = &per_cpu(cpu_state, i + state->first_cpu); + c->not_preferred = val[i]; + } + spin_unlock_irqrestore(&state_lock, flags); + + return count; +} + +static ssize_t show_not_preferred(const struct cluster_data *state, char *buf) +{ + struct cpu_data *c; + ssize_t count = 0; + unsigned long flags; + int i; + + spin_lock_irqsave(&state_lock, flags); + for (i = 0; i < state->num_cpus; i++) { + c = &per_cpu(cpu_state, i + state->first_cpu); + count += scnprintf(buf + count, PAGE_SIZE - count, + "CPU#%d: %u\n", c->cpu, c->not_preferred); + } + spin_unlock_irqrestore(&state_lock, flags); + + return count; +} + + +struct core_ctl_attr { + struct attribute attr; + ssize_t (*show)(const struct cluster_data *, char *); + ssize_t (*store)(struct cluster_data *, const char *, size_t count); +}; + +#define core_ctl_attr_ro(_name) \ +static struct core_ctl_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define core_ctl_attr_rw(_name) \ +static struct core_ctl_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +core_ctl_attr_rw(min_cpus); +core_ctl_attr_rw(max_cpus); +core_ctl_attr_rw(offline_delay_ms); +core_ctl_attr_rw(busy_up_thres); +core_ctl_attr_rw(busy_down_thres); +core_ctl_attr_rw(task_thres); +core_ctl_attr_rw(is_big_cluster); +core_ctl_attr_ro(need_cpus); +core_ctl_attr_ro(active_cpus); +core_ctl_attr_ro(global_state); +core_ctl_attr_rw(not_preferred); +core_ctl_attr_rw(enable); + +static struct attribute *default_attrs[] = { + &min_cpus.attr, + &max_cpus.attr, + &offline_delay_ms.attr, + &busy_up_thres.attr, + &busy_down_thres.attr, + &task_thres.attr, + &is_big_cluster.attr, + &enable.attr, + &need_cpus.attr, + &active_cpus.attr, + &global_state.attr, + ¬_preferred.attr, + NULL +}; + +#define to_cluster_data(k) container_of(k, struct cluster_data, kobj) +#define to_attr(a) container_of(a, struct core_ctl_attr, attr) +static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct cluster_data *data = to_cluster_data(kobj); + struct core_ctl_attr *cattr = to_attr(attr); + ssize_t ret = -EIO; + + if (cattr->show) + ret = cattr->show(data, buf); + + return ret; +} + +static ssize_t store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct cluster_data *data = to_cluster_data(kobj); + struct core_ctl_attr *cattr = to_attr(attr); + ssize_t ret = -EIO; + + if (cattr->store) + ret = cattr->store(data, buf, count); + + return ret; +} + +static const struct sysfs_ops sysfs_ops = { + .show = show, + .store = store, +}; + +static struct kobj_type ktype_core_ctl = { + .sysfs_ops = &sysfs_ops, + .default_attrs = default_attrs, +}; + +/* ==================== runqueue based core count =================== */ + +#define RQ_AVG_TOLERANCE 2 +#define RQ_AVG_DEFAULT_MS 20 +static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS; + +static s64 rq_avg_timestamp_ms; + +static void update_running_avg(bool trigger_update) +{ + int avg, iowait_avg, big_avg, old_nrrun; + int old_max_nr, max_nr, big_max_nr; + s64 now; + unsigned long flags; + struct cluster_data *cluster; + unsigned int index = 0; + + spin_lock_irqsave(&state_lock, flags); + + now = ktime_to_ms(ktime_get()); + if (now - rq_avg_timestamp_ms < rq_avg_period_ms - RQ_AVG_TOLERANCE) { + spin_unlock_irqrestore(&state_lock, flags); + return; + } + rq_avg_timestamp_ms = now; + sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg, + &max_nr, &big_max_nr); + + spin_unlock_irqrestore(&state_lock, flags); + + for_each_cluster(cluster, index) { + if (!cluster->inited) + continue; + + old_nrrun = cluster->nrrun; + old_max_nr = cluster->max_nr; + cluster->nrrun = cluster->is_big_cluster ? big_avg : avg; + cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr; + + if (cluster->nrrun != old_nrrun || + cluster->max_nr != old_max_nr) { + + if (trigger_update) + apply_need(cluster); + else + cluster->nrrun_changed = true; + } + } + return; +} + +#define MAX_NR_THRESHOLD 4 +/* adjust needed CPUs based on current runqueue information */ +static unsigned int apply_task_need(const struct cluster_data *cluster, + unsigned int new_need) +{ + /* unisolate all cores if there are enough tasks */ + if (cluster->nrrun >= cluster->task_thres) + return cluster->num_cpus; + + /* only unisolate more cores if there are tasks to run */ + if (cluster->nrrun > new_need) + new_need = new_need + 1; + + /* + * We don't want tasks to be overcrowded in a cluster. + * If any CPU has more than MAX_NR_THRESHOLD in the last + * window, bring another CPU to help out. + */ + if (cluster->max_nr > MAX_NR_THRESHOLD) + new_need = new_need + 1; + + return new_need; +} + +/* ======================= load based core count ====================== */ + +static unsigned int apply_limits(const struct cluster_data *cluster, + unsigned int need_cpus) +{ + return min(max(cluster->min_cpus, need_cpus), cluster->max_cpus); +} + +static unsigned int get_active_cpu_count(const struct cluster_data *cluster) +{ + return cluster->num_cpus - + sched_isolate_count(&cluster->cpu_mask, true); +} + +static bool is_active(const struct cpu_data *state) +{ + return cpu_online(state->cpu) && !cpu_isolated(state->cpu); +} + +static bool adjustment_possible(const struct cluster_data *cluster, + unsigned int need) +{ + return (need < cluster->active_cpus || (need > cluster->active_cpus && + cluster->nr_isolated_cpus)); +} + +static bool eval_need(struct cluster_data *cluster) +{ + unsigned long flags; + struct cpu_data *c; + unsigned int need_cpus = 0, last_need, thres_idx; + int ret = 0; + bool need_flag = false; + unsigned int new_need; + s64 now, elapsed; + + if (unlikely(!cluster->inited)) + return 0; + + spin_lock_irqsave(&state_lock, flags); + + if (cluster->boost || !cluster->enable) { + need_cpus = cluster->max_cpus; + } else { + cluster->active_cpus = get_active_cpu_count(cluster); + thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0; + list_for_each_entry(c, &cluster->lru, sib) { + if (c->busy >= cluster->busy_up_thres[thres_idx] || + sched_cpu_high_irqload(c->cpu)) + c->is_busy = true; + else if (c->busy < cluster->busy_down_thres[thres_idx]) + c->is_busy = false; + need_cpus += c->is_busy; + } + need_cpus = apply_task_need(cluster, need_cpus); + } + new_need = apply_limits(cluster, need_cpus); + need_flag = adjustment_possible(cluster, new_need); + + last_need = cluster->need_cpus; + now = ktime_to_ms(ktime_get()); + + if (new_need > cluster->active_cpus) { + ret = 1; + } else { + if (new_need == last_need) { + cluster->need_ts = now; + spin_unlock_irqrestore(&state_lock, flags); + return 0; + } + + elapsed = now - cluster->need_ts; + ret = elapsed >= cluster->offline_delay_ms; + } + + if (ret) { + cluster->need_ts = now; + cluster->need_cpus = new_need; + } + trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, + ret && need_flag); + spin_unlock_irqrestore(&state_lock, flags); + + return ret && need_flag; +} + +static void apply_need(struct cluster_data *cluster) +{ + if (eval_need(cluster)) + wake_up_core_ctl_thread(cluster); +} + +static int core_ctl_set_busy(unsigned int cpu, unsigned int busy) +{ + struct cpu_data *c = &per_cpu(cpu_state, cpu); + struct cluster_data *cluster = c->cluster; + unsigned int old_is_busy = c->is_busy; + + if (!cluster || !cluster->inited) + return 0; + + update_running_avg(false); + if (c->busy == busy && !cluster->nrrun_changed) + return 0; + c->busy = busy; + cluster->nrrun_changed = false; + + apply_need(cluster); + trace_core_ctl_set_busy(cpu, busy, old_is_busy, c->is_busy); + return 0; +} + +/* ========================= core count enforcement ==================== */ + +static void wake_up_core_ctl_thread(struct cluster_data *cluster) +{ + unsigned long flags; + + spin_lock_irqsave(&cluster->pending_lock, flags); + cluster->pending = true; + spin_unlock_irqrestore(&cluster->pending_lock, flags); + + wake_up_process_no_notif(cluster->core_ctl_thread); +} + +static u64 core_ctl_check_timestamp; +static u64 core_ctl_check_interval; + +static bool do_check(u64 wallclock) +{ + bool do_check = false; + unsigned long flags; + + spin_lock_irqsave(&state_lock, flags); + if ((wallclock - core_ctl_check_timestamp) >= core_ctl_check_interval) { + core_ctl_check_timestamp = wallclock; + do_check = true; + } + spin_unlock_irqrestore(&state_lock, flags); + return do_check; +} + +int core_ctl_set_boost(bool boost) +{ + unsigned int index = 0; + struct cluster_data *cluster; + unsigned long flags; + int ret = 0; + bool boost_state_changed = false; + + if (unlikely(!initialized)) + return 0; + + spin_lock_irqsave(&state_lock, flags); + for_each_cluster(cluster, index) { + if (cluster->is_big_cluster) { + if (boost) { + boost_state_changed = !cluster->boost; + ++cluster->boost; + } else { + if (!cluster->boost) { + pr_err("Error turning off boost. Boost already turned off\n"); + ret = -EINVAL; + } else { + --cluster->boost; + boost_state_changed = !cluster->boost; + } + } + break; + } + } + spin_unlock_irqrestore(&state_lock, flags); + + if (boost_state_changed) + apply_need(cluster); + + trace_core_ctl_set_boost(cluster->boost, ret); + + return ret; +} +EXPORT_SYMBOL(core_ctl_set_boost); + +void core_ctl_check(u64 wallclock) +{ + if (unlikely(!initialized)) + return; + + if (do_check(wallclock)) { + unsigned int index = 0; + struct cluster_data *cluster; + + update_running_avg(true); + + for_each_cluster(cluster, index) { + if (eval_need(cluster)) + wake_up_core_ctl_thread(cluster); + } + } +} + +static void move_cpu_lru(struct cpu_data *cpu_data) +{ + unsigned long flags; + + spin_lock_irqsave(&state_lock, flags); + list_del(&cpu_data->sib); + list_add_tail(&cpu_data->sib, &cpu_data->cluster->lru); + spin_unlock_irqrestore(&state_lock, flags); +} + +static void try_to_isolate(struct cluster_data *cluster, unsigned int need) +{ + struct cpu_data *c, *tmp; + unsigned long flags; + unsigned int num_cpus = cluster->num_cpus; + unsigned int nr_isolated = 0; + + /* + * Protect against entry being removed (and added at tail) by other + * thread (hotplug). + */ + spin_lock_irqsave(&state_lock, flags); + list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { + if (!num_cpus--) + break; + + if (!is_active(c)) + continue; + if (cluster->active_cpus == need) + break; + /* Don't offline busy CPUs. */ + if (c->is_busy) + continue; + + spin_unlock_irqrestore(&state_lock, flags); + + pr_debug("Trying to isolate CPU%u\n", c->cpu); + if (!sched_isolate_cpu(c->cpu)) { + c->isolated_by_us = true; + move_cpu_lru(c); + nr_isolated++; + } else { + pr_debug("Unable to isolate CPU%u\n", c->cpu); + } + cluster->active_cpus = get_active_cpu_count(cluster); + spin_lock_irqsave(&state_lock, flags); + } + cluster->nr_isolated_cpus += nr_isolated; + spin_unlock_irqrestore(&state_lock, flags); + + /* + * If the number of active CPUs is within the limits, then + * don't force isolation of any busy CPUs. + */ + if (cluster->active_cpus <= cluster->max_cpus) + return; + + nr_isolated = 0; + num_cpus = cluster->num_cpus; + spin_lock_irqsave(&state_lock, flags); + list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { + if (!num_cpus--) + break; + + if (!is_active(c)) + continue; + if (cluster->active_cpus <= cluster->max_cpus) + break; + + spin_unlock_irqrestore(&state_lock, flags); + + pr_debug("Trying to isolate CPU%u\n", c->cpu); + if (!sched_isolate_cpu(c->cpu)) { + c->isolated_by_us = true; + move_cpu_lru(c); + nr_isolated++; + } else { + pr_debug("Unable to isolate CPU%u\n", c->cpu); + } + cluster->active_cpus = get_active_cpu_count(cluster); + spin_lock_irqsave(&state_lock, flags); + } + cluster->nr_isolated_cpus += nr_isolated; + spin_unlock_irqrestore(&state_lock, flags); + +} + +static void __try_to_unisolate(struct cluster_data *cluster, + unsigned int need, bool force) +{ + struct cpu_data *c, *tmp; + unsigned long flags; + unsigned int num_cpus = cluster->num_cpus; + unsigned int nr_unisolated = 0; + + /* + * Protect against entry being removed (and added at tail) by other + * thread (hotplug). + */ + spin_lock_irqsave(&state_lock, flags); + list_for_each_entry_safe(c, tmp, &cluster->lru, sib) { + if (!num_cpus--) + break; + + if (!c->isolated_by_us) + continue; + if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) || + (!force && c->not_preferred)) + continue; + if (cluster->active_cpus == need) + break; + + spin_unlock_irqrestore(&state_lock, flags); + + pr_debug("Trying to unisolate CPU%u\n", c->cpu); + if (!sched_unisolate_cpu(c->cpu)) { + c->isolated_by_us = false; + move_cpu_lru(c); + nr_unisolated++; + } else { + pr_debug("Unable to unisolate CPU%u\n", c->cpu); + } + cluster->active_cpus = get_active_cpu_count(cluster); + spin_lock_irqsave(&state_lock, flags); + } + cluster->nr_isolated_cpus -= nr_unisolated; + spin_unlock_irqrestore(&state_lock, flags); +} + +static void try_to_unisolate(struct cluster_data *cluster, unsigned int need) +{ + bool force_use_non_preferred = false; + + __try_to_unisolate(cluster, need, force_use_non_preferred); + + if (cluster->active_cpus == need) + return; + + force_use_non_preferred = true; + __try_to_unisolate(cluster, need, force_use_non_preferred); +} + +static void __ref do_core_ctl(struct cluster_data *cluster) +{ + unsigned int need; + + need = apply_limits(cluster, cluster->need_cpus); + + if (adjustment_possible(cluster, need)) { + pr_debug("Trying to adjust group %u from %u to %u\n", + cluster->first_cpu, cluster->active_cpus, need); + + if (cluster->active_cpus > need) + try_to_isolate(cluster, need); + else if (cluster->active_cpus < need) + try_to_unisolate(cluster, need); + } +} + +static int __ref try_core_ctl(void *data) +{ + struct cluster_data *cluster = data; + unsigned long flags; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&cluster->pending_lock, flags); + if (!cluster->pending) { + spin_unlock_irqrestore(&cluster->pending_lock, flags); + schedule(); + if (kthread_should_stop()) + break; + spin_lock_irqsave(&cluster->pending_lock, flags); + } + set_current_state(TASK_RUNNING); + cluster->pending = false; + spin_unlock_irqrestore(&cluster->pending_lock, flags); + + do_core_ctl(cluster); + } + + return 0; +} + +static int __ref cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + uint32_t cpu = (uintptr_t)hcpu; + struct cpu_data *state = &per_cpu(cpu_state, cpu); + struct cluster_data *cluster = state->cluster; + unsigned int need; + bool do_wakeup, unisolated = false; + unsigned long flags; + + if (unlikely(!cluster || !cluster->inited)) + return NOTIFY_DONE; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + cluster->active_cpus = get_active_cpu_count(cluster); + + /* + * Moving to the end of the list should only happen in + * CPU_ONLINE and not on CPU_UP_PREPARE to prevent an + * infinite list traversal when thermal (or other entities) + * reject trying to online CPUs. + */ + move_cpu_lru(state); + break; + + case CPU_DEAD: + /* + * We don't want to have a CPU both offline and isolated. + * So unisolate a CPU that went down if it was isolated by us. + */ + if (state->isolated_by_us) { + sched_unisolate_cpu_unlocked(cpu); + state->isolated_by_us = false; + unisolated = true; + } + + /* Move a CPU to the end of the LRU when it goes offline. */ + move_cpu_lru(state); + + state->busy = 0; + cluster->active_cpus = get_active_cpu_count(cluster); + break; + default: + return NOTIFY_DONE; + } + + need = apply_limits(cluster, cluster->need_cpus); + spin_lock_irqsave(&state_lock, flags); + if (unisolated) + cluster->nr_isolated_cpus--; + do_wakeup = adjustment_possible(cluster, need); + spin_unlock_irqrestore(&state_lock, flags); + if (do_wakeup) + wake_up_core_ctl_thread(cluster); + + return NOTIFY_OK; +} + +static struct notifier_block __refdata cpu_notifier = { + .notifier_call = cpu_callback, +}; + +/* ============================ init code ============================== */ + +static cpumask_var_t core_ctl_disable_cpumask; +static bool core_ctl_disable_cpumask_present; + +static int __init core_ctl_disable_setup(char *str) +{ + if (!*str) + return -EINVAL; + + alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask); + + if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) { + free_bootmem_cpumask_var(core_ctl_disable_cpumask); + return -EINVAL; + } + + core_ctl_disable_cpumask_present = true; + pr_info("disable_cpumask=%*pbl\n", + cpumask_pr_args(core_ctl_disable_cpumask)); + + return 0; +} +early_param("core_ctl_disable_cpumask", core_ctl_disable_setup); + +static bool should_skip(const struct cpumask *mask) +{ + if (!core_ctl_disable_cpumask_present) + return false; + + /* + * We operate on a cluster basis. Disable the core_ctl for + * a cluster, if all of it's cpus are specified in + * core_ctl_disable_cpumask + */ + return cpumask_subset(mask, core_ctl_disable_cpumask); +} + +static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu) +{ + unsigned int i; + + for (i = 0; i < num_clusters; ++i) { + if (cluster_state[i].first_cpu == first_cpu) + return &cluster_state[i]; + } + + return NULL; +} + +static int cluster_init(const struct cpumask *mask) +{ + struct device *dev; + unsigned int first_cpu = cpumask_first(mask); + struct cluster_data *cluster; + struct cpu_data *state; + unsigned int cpu; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + if (should_skip(mask)) + return 0; + + if (find_cluster_by_first_cpu(first_cpu)) + return 0; + + dev = get_cpu_device(first_cpu); + if (!dev) + return -ENODEV; + + pr_info("Creating CPU group %d\n", first_cpu); + + if (num_clusters == MAX_CLUSTERS) { + pr_err("Unsupported number of clusters. Only %u supported\n", + MAX_CLUSTERS); + return -EINVAL; + } + cluster = &cluster_state[num_clusters]; + ++num_clusters; + + cpumask_copy(&cluster->cpu_mask, mask); + cluster->num_cpus = cpumask_weight(mask); + if (cluster->num_cpus > MAX_CPUS_PER_CLUSTER) { + pr_err("HW configuration not supported\n"); + return -EINVAL; + } + cluster->first_cpu = first_cpu; + cluster->min_cpus = 1; + cluster->max_cpus = cluster->num_cpus; + cluster->need_cpus = cluster->num_cpus; + cluster->offline_delay_ms = 100; + cluster->task_thres = UINT_MAX; + cluster->nrrun = cluster->num_cpus; + cluster->enable = true; + INIT_LIST_HEAD(&cluster->lru); + spin_lock_init(&cluster->pending_lock); + + for_each_cpu(cpu, mask) { + pr_info("Init CPU%u state\n", cpu); + + state = &per_cpu(cpu_state, cpu); + state->cluster = cluster; + state->cpu = cpu; + list_add_tail(&state->sib, &cluster->lru); + } + cluster->active_cpus = get_active_cpu_count(cluster); + + cluster->core_ctl_thread = kthread_run(try_core_ctl, (void *) cluster, + "core_ctl/%d", first_cpu); + if (IS_ERR(cluster->core_ctl_thread)) + return PTR_ERR(cluster->core_ctl_thread); + + sched_setscheduler_nocheck(cluster->core_ctl_thread, SCHED_FIFO, + ¶m); + + cluster->inited = true; + + kobject_init(&cluster->kobj, &ktype_core_ctl); + return kobject_add(&cluster->kobj, &dev->kobj, "core_ctl"); +} + +static int cpufreq_policy_cb(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_policy *policy = data; + int ret; + + switch (val) { + case CPUFREQ_CREATE_POLICY: + ret = cluster_init(policy->related_cpus); + if (ret) + pr_warn("unable to create core ctl group: %d\n", ret); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_pol_nb = { + .notifier_call = cpufreq_policy_cb, +}; + +static int cpufreq_gov_cb(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cpufreq_govinfo *info = data; + + switch (val) { + case CPUFREQ_LOAD_CHANGE: + core_ctl_set_busy(info->cpu, info->load); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_gov_nb = { + .notifier_call = cpufreq_gov_cb, +}; + +static int __init core_ctl_init(void) +{ + unsigned int cpu; + + if (should_skip(cpu_possible_mask)) + return 0; + + core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE) + * NSEC_PER_MSEC; + + register_cpu_notifier(&cpu_notifier); + cpufreq_register_notifier(&cpufreq_pol_nb, CPUFREQ_POLICY_NOTIFIER); + cpufreq_register_notifier(&cpufreq_gov_nb, CPUFREQ_GOVINFO_NOTIFIER); + + cpu_maps_update_begin(); + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy; + int ret; + + policy = cpufreq_cpu_get(cpu); + if (policy) { + ret = cluster_init(policy->related_cpus); + if (ret) + pr_warn("unable to create core ctl group: %d\n" + , ret); + cpufreq_cpu_put(policy); + } + } + cpu_maps_update_done(); + initialized = true; + return 0; +} + +late_initcall(core_ctl_init); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 040d8e8b6c33..6c84b4d28914 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -33,7 +33,6 @@ struct sugov_tunables { struct gov_attr_set attr_set; unsigned int up_rate_limit_us; unsigned int down_rate_limit_us; - bool iowait_boost_enable; }; struct sugov_policy { @@ -219,7 +218,7 @@ static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time) rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT; *util = boosted_cpu_util(cpu); - if (use_pelt()) + if (likely(use_pelt())) *util = *util + rt; *util = min(*util, max_cap); @@ -229,21 +228,6 @@ static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time) static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) { - struct sugov_policy *sg_policy = sg_cpu->sg_policy; - - if (!sg_policy->tunables->iowait_boost_enable) - return; - - /* Clear iowait_boost if the CPU apprears to have been idle. */ - if (sg_cpu->iowait_boost) { - s64 delta_ns = time - sg_cpu->last_update; - - if (delta_ns > TICK_NSEC) { - sg_cpu->iowait_boost = 0; - sg_cpu->iowait_boost_pending = false; - } - } - if (flags & SCHED_CPUFREQ_IOWAIT) { if (sg_cpu->iowait_boost_pending) return; @@ -257,6 +241,14 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, } else { sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; } + } else if (sg_cpu->iowait_boost) { + s64 delta_ns = time - sg_cpu->last_update; + + /* Clear iowait_boost if the CPU apprears to have been idle. */ + if (delta_ns > TICK_NSEC) { + sg_cpu->iowait_boost = 0; + sg_cpu->iowait_boost_pending = false; + } } } @@ -518,36 +510,12 @@ static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set, return count; } -static ssize_t iowait_boost_enable_show(struct gov_attr_set *attr_set, - char *buf) -{ - struct sugov_tunables *tunables = to_sugov_tunables(attr_set); - - return sprintf(buf, "%u\n", tunables->iowait_boost_enable); -} - -static ssize_t iowait_boost_enable_store(struct gov_attr_set *attr_set, - const char *buf, size_t count) -{ - struct sugov_tunables *tunables = to_sugov_tunables(attr_set); - bool enable; - - if (kstrtobool(buf, &enable)) - return -EINVAL; - - tunables->iowait_boost_enable = enable; - - return count; -} - static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us); static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us); -static struct governor_attr iowait_boost_enable = __ATTR_RW(iowait_boost_enable); static struct attribute *sugov_attributes[] = { &up_rate_limit_us.attr, &down_rate_limit_us.attr, - &iowait_boost_enable.attr, NULL }; @@ -707,8 +675,6 @@ static int sugov_init(struct cpufreq_policy *policy) } } - tunables->iowait_boost_enable = policy->iowait_boost_enable; - policy->governor_data = sg_policy; sg_policy->tunables = tunables; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index fc2cfd6b2941..e6ec68c15aa3 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -6,7 +6,6 @@ #include #include #include "sched.h" -#include "walt.h" #ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -51,10 +50,8 @@ void irqtime_account_irq(struct task_struct *curr) unsigned long flags; s64 delta; int cpu; -#ifdef CONFIG_SCHED_WALT u64 wallclock; bool account = true; -#endif if (!sched_clock_irqtime) return; @@ -62,10 +59,8 @@ void irqtime_account_irq(struct task_struct *curr) local_irq_save(flags); cpu = smp_processor_id(); -#ifdef CONFIG_SCHED_WALT wallclock = sched_clock_cpu(cpu); -#endif - delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); + delta = wallclock - __this_cpu_read(irq_start_time); __this_cpu_add(irq_start_time, delta); irq_time_write_begin(); @@ -79,16 +74,16 @@ void irqtime_account_irq(struct task_struct *curr) __this_cpu_add(cpu_hardirq_time, delta); else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) __this_cpu_add(cpu_softirq_time, delta); -#ifdef CONFIG_SCHED_WALT else account = false; -#endif irq_time_write_end(); -#ifdef CONFIG_SCHED_WALT + if (account) - walt_account_irqtime(cpu, curr, delta, wallclock); -#endif + sched_account_irqtime(cpu, curr, delta, wallclock); + else if (curr != this_cpu_ksoftirqd()) + sched_account_irqstart(cpu, curr, wallclock); + local_irq_restore(flags); } EXPORT_SYMBOL_GPL(irqtime_account_irq); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7d43214c1f7e..188c8388a63f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -994,6 +994,41 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} #endif /* CONFIG_SMP */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ + s64 task_load_delta = (s64)new_task_load - task_load(p); + s64 pred_demand_delta = PRED_DEMAND_DELTA; + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta, + pred_demand_delta); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static inline void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { @@ -1003,7 +1038,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) WARN_ON(!dl_prio(prio)); dl_rq->dl_nr_running++; add_nr_running(rq_of_dl_rq(dl_rq), 1); - walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); + inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); inc_dl_deadline(dl_rq, deadline); inc_dl_migration(dl_se, dl_rq); @@ -1018,7 +1053,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) WARN_ON(!dl_rq->dl_nr_running); dl_rq->dl_nr_running--; sub_nr_running(rq_of_dl_rq(dl_rq), 1); - walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); + dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se)); dec_dl_deadline(dl_rq, dl_se->deadline); dec_dl_migration(dl_se, dl_rq); @@ -2018,6 +2053,11 @@ const struct sched_class dl_sched_class = { .switched_to = switched_to_dl, .update_curr = update_curr_dl, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_dl, + .dec_hmp_sched_stats = dec_hmp_sched_stats_dl, + .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl, +#endif }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index f2e043438b8a..ed8e6bb4531b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -227,6 +227,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) cfs_rq->throttled); SEQ_printf(m, " .%-30s: %d\n", "throttle_count", cfs_rq->throttle_count); + SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled", + cfs_rq->runtime_enabled); +#ifdef CONFIG_SCHED_HMP + SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks", + cfs_rq->hmp_stats.nr_big_tasks); + SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg", + cfs_rq->hmp_stats.cumulative_runnable_avg); +#endif #endif #ifdef CONFIG_FAIR_GROUP_SCHED @@ -306,6 +314,23 @@ do { \ P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); +#ifdef CONFIG_SMP + P(cpu_capacity); +#endif +#ifdef CONFIG_SCHED_HMP + P(static_cpu_pwr_cost); + P(cluster->static_cluster_pwr_cost); + P(cluster->load_scale_factor); + P(cluster->capacity); + P(cluster->max_possible_capacity); + P(cluster->efficiency); + P(cluster->cur_freq); + P(cluster->max_freq); + P(cluster->exec_scale_factor); + P(hmp_stats.nr_big_tasks); + SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg", + rq->hmp_stats.cumulative_runnable_avg); +#endif #undef P #undef PN @@ -386,6 +411,15 @@ static void sched_debug_header(struct seq_file *m) PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); +#ifdef CONFIG_SCHED_HMP + P(sched_upmigrate); + P(sched_downmigrate); + P(sched_init_task_load_windows); + P(min_capacity); + P(max_capacity); + P(sched_ravg_window); + P(sched_load_granule); +#endif #undef PN #undef P @@ -549,6 +583,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m) void proc_sched_show_task(struct task_struct *p, struct seq_file *m) { unsigned long nr_switches; + unsigned int load_avg; + + load_avg = pct_task_load(p); SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), get_nr_threads(p)); @@ -626,6 +663,13 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.statistics.nr_wakeups_cas_attempts); P(se.statistics.nr_wakeups_cas_count); +#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) + __P(load_avg); +#ifdef CONFIG_SCHED_HMP + P(ravg.demand); +#endif +#endif + { u64 avg_atom, avg_per_cpu; diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c index b0656b7a93e3..50d183b1e156 100644 --- a/kernel/sched/energy.c +++ b/kernel/sched/energy.c @@ -27,7 +27,10 @@ #include #include +#include "sched.h" + struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS]; +bool sched_energy_aware; static void free_resources(void) { @@ -56,6 +59,13 @@ void init_sched_energy_costs(void) int sd_level, i, nstates, cpu; const __be32 *val; + if (!energy_aware()) { + sched_energy_aware = false; + return; + } + + sched_energy_aware = true; + for_each_possible_cpu(cpu) { cn = of_get_cpu_node(cpu, NULL); if (!cn) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ec4006ec02f8..dead84667be4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -19,6 +19,11 @@ * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include #include @@ -55,12 +60,6 @@ unsigned int normalized_sysctl_sched_latency = 6000000ULL; unsigned int sysctl_sched_sync_hint_enable = 1; unsigned int sysctl_sched_cstate_aware = 1; -#ifdef CONFIG_SCHED_WALT -unsigned int sysctl_sched_use_walt_cpu_util = 1; -unsigned int sysctl_sched_use_walt_task_util = 1; -__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload = - (10 * NSEC_PER_MSEC); -#endif /* * The initial- and re-scaling of tunables is configurable * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) @@ -2661,25 +2660,28 @@ static inline void update_cfs_shares(struct sched_entity *se) } #endif /* CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_SMP u32 sched_get_wake_up_idle(struct task_struct *p) { - return 0; + u32 enabled = p->flags & PF_WAKE_UP_IDLE; + + return !!enabled; } EXPORT_SYMBOL(sched_get_wake_up_idle); int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle) { - return 0; -} -EXPORT_SYMBOL(sched_set_wake_up_idle); + int enable = !!wake_up_idle; + + if (enable) + p->flags |= PF_WAKE_UP_IDLE; + else + p->flags &= ~PF_WAKE_UP_IDLE; -int core_ctl_set_boost(bool boost) -{ return 0; } -EXPORT_SYMBOL(core_ctl_set_boost); +EXPORT_SYMBOL(sched_set_wake_up_idle); -#ifdef CONFIG_SMP static const u32 runnable_avg_yN_inv[] = { 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6, 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85, @@ -2759,229 +2761,1323 @@ static u32 __compute_runnable_contrib(u64 n) return contrib + runnable_avg_yN_sum[n]; } -#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10 -#error "load tracking assumes 2^10 as unit" -#endif +#ifdef CONFIG_SCHED_HMP + +/* CPU selection flag */ +#define SBC_FLAG_PREV_CPU 0x1 +#define SBC_FLAG_BEST_CAP_CPU 0x2 +#define SBC_FLAG_CPU_COST 0x4 +#define SBC_FLAG_MIN_COST 0x8 +#define SBC_FLAG_IDLE_LEAST_LOADED 0x10 +#define SBC_FLAG_IDLE_CSTATE 0x20 +#define SBC_FLAG_COST_CSTATE_TIE_BREAKER 0x40 +#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER 0x80 +#define SBC_FLAG_CSTATE_LOAD 0x100 +#define SBC_FLAG_BEST_SIBLING 0x200 +#define SBC_FLAG_WAKER_CPU 0x400 +#define SBC_FLAG_PACK_TASK 0x800 +#define SBC_FLAG_SKIP_RT_TASK 0x1000 + +/* Cluster selection flag */ +#define SBC_FLAG_COLOC_CLUSTER 0x10000 +#define SBC_FLAG_WAKER_CLUSTER 0x20000 +#define SBC_FLAG_BACKUP_CLUSTER 0x40000 +#define SBC_FLAG_BOOST_CLUSTER 0x80000 + +struct cpu_select_env { + struct task_struct *p; + struct related_thread_group *rtg; + u8 reason; + u8 need_idle:1; + u8 need_waker_cluster:1; + u8 sync:1; + enum sched_boost_policy boost_policy; + u8 pack_task:1; + int prev_cpu; + DECLARE_BITMAP(candidate_list, NR_CPUS); + DECLARE_BITMAP(backup_list, NR_CPUS); + u64 task_load; + u64 cpu_load; + u32 sbc_best_flag; + u32 sbc_best_cluster_flag; + struct cpumask search_cpus; +}; -#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) +struct cluster_cpu_stats { + int best_idle_cpu, least_loaded_cpu; + int best_capacity_cpu, best_cpu, best_sibling_cpu; + int min_cost, best_sibling_cpu_cost; + int best_cpu_wakeup_latency; + u64 min_load, best_load, best_sibling_cpu_load; + s64 highest_spare_capacity; +}; /* - * We can represent the historical contribution to runnable average as the - * coefficients of a geometric series. To do this we sub-divide our runnable - * history into segments of approximately 1ms (1024us); label the segment that - * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. - * - * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... - * p0 p1 p2 - * (now) (~1ms ago) (~2ms ago) - * - * Let u_i denote the fraction of p_i that the entity was runnable. + * Should task be woken to any available idle cpu? * - * We then designate the fractions u_i as our co-efficients, yielding the - * following representation of historical load: - * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... - * - * We choose y based on the with of a reasonably scheduling period, fixing: - * y^32 = 0.5 - * - * This means that the contribution to load ~32ms ago (u_32) will be weighted - * approximately half as much as the contribution to load within the last ms - * (u_0). - * - * When a period "rolls over" and we have new u_0`, multiplying the previous - * sum again by y is sufficient to update: - * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) - * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] + * Waking tasks to idle cpu has mixed implications on both performance and + * power. In many cases, scheduler can't estimate correctly impact of using idle + * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel + * module to pass a strong hint to scheduler that the task in question should be + * woken to idle cpu, generally to improve performance. */ -static __always_inline int -__update_load_avg(u64 now, int cpu, struct sched_avg *sa, - unsigned long weight, int running, struct cfs_rq *cfs_rq) +static inline int wake_to_idle(struct task_struct *p) { - u64 delta, scaled_delta, periods; - u32 contrib; - unsigned int delta_w, scaled_delta_w, decayed = 0; - unsigned long scale_freq, scale_cpu; + return (current->flags & PF_WAKE_UP_IDLE) || + (p->flags & PF_WAKE_UP_IDLE); +} - delta = now - sa->last_update_time; - /* - * This should only happen when time goes backwards, which it - * unfortunately does during sched clock init when we swap over to TSC. - */ - if ((s64)delta < 0) { - sa->last_update_time = now; +static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq) +{ + u64 total_load; + + total_load = env->task_load + env->cpu_load; + + if (total_load > sched_spill_load || + (rq->nr_running + 1) > sysctl_sched_spill_nr_run) + return 1; + + return 0; +} + +static int skip_cpu(int cpu, struct cpu_select_env *env) +{ + int tcpu = task_cpu(env->p); + int skip = 0; + + if (!env->reason) return 0; + + if (is_reserved(cpu)) + return 1; + + switch (env->reason) { + case UP_MIGRATION: + skip = !idle_cpu(cpu); + break; + case IRQLOAD_MIGRATION: + /* Purposely fall through */ + default: + skip = (cpu == tcpu); + break; } - /* - * Use 1024ns as the unit of measurement since it's a reasonable - * approximation of 1us and fast to compute. - */ - delta >>= 10; - if (!delta) - return 0; - sa->last_update_time = now; + return skip; +} - scale_freq = arch_scale_freq_capacity(NULL, cpu); - scale_cpu = arch_scale_cpu_capacity(NULL, cpu); - trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu); +static inline int +acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env) +{ + int tcpu; - /* delta_w is the amount already accumulated against our next period */ - delta_w = sa->period_contrib; - if (delta + delta_w >= 1024) { - decayed = 1; + if (!env->reason) + return 1; - /* how much left for next period will start over, we don't know yet */ - sa->period_contrib = 0; + tcpu = task_cpu(env->p); + switch (env->reason) { + case UP_MIGRATION: + return cluster->capacity > cpu_capacity(tcpu); - /* - * Now that we know we're crossing a period boundary, figure - * out how much from delta we need to complete the current - * period and accrue it. - */ - delta_w = 1024 - delta_w; - scaled_delta_w = cap_scale(delta_w, scale_freq); - if (weight) { - sa->load_sum += weight * scaled_delta_w; - if (cfs_rq) { - cfs_rq->runnable_load_sum += - weight * scaled_delta_w; - } - } - if (running) - sa->util_sum += scaled_delta_w * scale_cpu; + case DOWN_MIGRATION: + return cluster->capacity < cpu_capacity(tcpu); - delta -= delta_w; + default: + break; + } - /* Figure out how many additional periods this update spans */ - periods = delta / 1024; - delta %= 1024; + return 1; +} - sa->load_sum = decay_load(sa->load_sum, periods + 1); - if (cfs_rq) { - cfs_rq->runnable_load_sum = - decay_load(cfs_rq->runnable_load_sum, periods + 1); - } - sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1); +static int +skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env) +{ + if (!test_bit(cluster->id, env->candidate_list)) + return 1; - /* Efficiently calculate \sum (1..n_period) 1024*y^i */ - contrib = __compute_runnable_contrib(periods); - contrib = cap_scale(contrib, scale_freq); - if (weight) { - sa->load_sum += weight * contrib; - if (cfs_rq) - cfs_rq->runnable_load_sum += weight * contrib; - } - if (running) - sa->util_sum += contrib * scale_cpu; + if (!acceptable_capacity(cluster, env)) { + __clear_bit(cluster->id, env->candidate_list); + return 1; } - /* Remainder of delta accrued against u_0` */ - scaled_delta = cap_scale(delta, scale_freq); - if (weight) { - sa->load_sum += weight * scaled_delta; - if (cfs_rq) - cfs_rq->runnable_load_sum += weight * scaled_delta; + return 0; +} + +static struct sched_cluster * +select_least_power_cluster(struct cpu_select_env *env) +{ + struct sched_cluster *cluster; + + if (env->rtg) { + int cpu = cluster_first_cpu(env->rtg->preferred_cluster); + + env->task_load = scale_load_to_cpu(task_load(env->p), cpu); + + if (task_load_will_fit(env->p, env->task_load, + cpu, env->boost_policy)) { + env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER; + + if (env->boost_policy == SCHED_BOOST_NONE) + return env->rtg->preferred_cluster; + + for_each_sched_cluster(cluster) { + if (cluster != env->rtg->preferred_cluster) { + __set_bit(cluster->id, + env->backup_list); + __clear_bit(cluster->id, + env->candidate_list); + } + } + + return env->rtg->preferred_cluster; + } + + /* + * Since the task load does not fit on the preferred + * cluster anymore, pretend that the task does not + * have any preferred cluster. This allows the waking + * task to get the appropriate CPU it needs as per the + * non co-location placement policy without having to + * wait until the preferred cluster is updated. + */ + env->rtg = NULL; } - if (running) - sa->util_sum += scaled_delta * scale_cpu; + for_each_sched_cluster(cluster) { + if (!skip_cluster(cluster, env)) { + int cpu = cluster_first_cpu(cluster); - sa->period_contrib += delta; + env->task_load = scale_load_to_cpu(task_load(env->p), + cpu); + if (task_load_will_fit(env->p, env->task_load, cpu, + env->boost_policy)) + return cluster; - if (decayed) { - sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX); - if (cfs_rq) { - cfs_rq->runnable_load_avg = - div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX); + __set_bit(cluster->id, env->backup_list); + __clear_bit(cluster->id, env->candidate_list); } - sa->util_avg = sa->util_sum / LOAD_AVG_MAX; } - return decayed; + return NULL; } -/* - * Signed add and clamp on underflow. - * - * Explicitly do a load-store to ensure the intermediate value never hits - * memory. This allows lockless observations without ever seeing the negative - * values. - */ -#define add_positive(_ptr, _val) do { \ - typeof(_ptr) ptr = (_ptr); \ - typeof(_val) val = (_val); \ - typeof(*ptr) res, var = READ_ONCE(*ptr); \ - \ - res = var + val; \ - \ - if (val < 0 && res > var) \ - res = 0; \ - \ - WRITE_ONCE(*ptr, res); \ -} while (0) - -#ifdef CONFIG_FAIR_GROUP_SCHED -/** - * update_tg_load_avg - update the tg's load avg - * @cfs_rq: the cfs_rq whose avg changed - * @force: update regardless of how small the difference - * - * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. - * However, because tg->load_avg is a global value there are performance - * considerations. - * - * In order to avoid having to look at the other cfs_rq's, we use a - * differential update where we store the last value we propagated. This in - * turn allows skipping updates if the differential is 'small'. - * - * Updating tg's load_avg is necessary before update_cfs_share() (which is - * done) and effective_load() (which is not done because it is too costly). - */ -static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) +static struct sched_cluster * +next_candidate(const unsigned long *list, int start, int end) { - long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; + int cluster_id; - /* - * No need to update load_avg for root_task_group as it is not used. - */ - if (cfs_rq->tg == &root_task_group) - return; + cluster_id = find_next_bit(list, end, start - 1 + 1); + if (cluster_id >= end) + return NULL; - if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { - atomic_long_add(delta, &cfs_rq->tg->load_avg); - cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; + return sched_cluster[cluster_id]; +} + +static void +update_spare_capacity(struct cluster_cpu_stats *stats, + struct cpu_select_env *env, int cpu, int capacity, + u64 cpu_load) +{ + s64 spare_capacity = sched_ravg_window - cpu_load; + + if (spare_capacity > 0 && + (spare_capacity > stats->highest_spare_capacity || + (spare_capacity == stats->highest_spare_capacity && + ((!env->need_waker_cluster && + capacity > cpu_capacity(stats->best_capacity_cpu)) || + (env->need_waker_cluster && + cpu_rq(cpu)->nr_running < + cpu_rq(stats->best_capacity_cpu)->nr_running))))) { + /* + * If sync waker is the only runnable of CPU, cr_avg of the + * CPU is 0 so we have high chance to place the wakee on the + * waker's CPU which likely causes preemtion of the waker. + * This can lead migration of preempted waker. Place the + * wakee on the real idle CPU when it's possible by checking + * nr_running to avoid such preemption. + */ + stats->highest_spare_capacity = spare_capacity; + stats->best_capacity_cpu = cpu; } } -/* - * Called within set_task_rq() right before setting a task's cpu. The - * caller only guarantees p->pi_lock is held; no other assumptions, - * including the state of rq->lock, should be made. - */ -void set_task_rq_fair(struct sched_entity *se, - struct cfs_rq *prev, struct cfs_rq *next) +static inline void find_backup_cluster( +struct cpu_select_env *env, struct cluster_cpu_stats *stats) { - if (!sched_feat(ATTACH_AGE_LOAD)) - return; + struct sched_cluster *next = NULL; + int i; + struct cpumask search_cpus; - /* - * We are supposed to update the task to "current" time, then its up to - * date and ready to go to new CPU/cfs_rq. But we have difficulty in - * getting what current time is, so simply throw away the out-of-date - * time. This will result in the wakee task is less decayed, but giving - * the wakee more load sounds not bad. - */ - if (se->avg.last_update_time && prev) { - u64 p_last_update_time; - u64 n_last_update_time; + extern int num_clusters; -#ifndef CONFIG_64BIT - u64 p_last_update_time_copy; - u64 n_last_update_time_copy; + while (!bitmap_empty(env->backup_list, num_clusters)) { + next = next_candidate(env->backup_list, 0, num_clusters); + __clear_bit(next->id, env->backup_list); - do { - p_last_update_time_copy = prev->load_last_update_time_copy; - n_last_update_time_copy = next->load_last_update_time_copy; + cpumask_and(&search_cpus, &env->search_cpus, &next->cpus); + for_each_cpu(i, &search_cpus) { + if (trace_sched_cpu_load_wakeup_enabled()) { + trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i), + sched_irqload(i), power_cost(i, task_load(env->p) + + cpu_cravg_sync(i, env->sync)), 0); + } + update_spare_capacity(stats, env, i, next->capacity, + cpu_load_sync(i, env->sync)); + } + env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER; + } +} + +struct sched_cluster * +next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env, + struct cluster_cpu_stats *stats) +{ + struct sched_cluster *next = NULL; + + extern int num_clusters; + + __clear_bit(cluster->id, env->candidate_list); + + if (env->rtg && preferred_cluster(cluster, env->p)) + return NULL; + + do { + if (bitmap_empty(env->candidate_list, num_clusters)) + return NULL; + + next = next_candidate(env->candidate_list, 0, num_clusters); + if (next) { + if (next->min_power_cost > stats->min_cost) { + clear_bit(next->id, env->candidate_list); + next = NULL; + continue; + } + + if (skip_cluster(next, env)) + next = NULL; + } + } while (!next); + + env->task_load = scale_load_to_cpu(task_load(env->p), + cluster_first_cpu(next)); + return next; +} + +/* + * Returns true, if a current task has RT/DL class: + * SCHED_FIFO + SCHED_RR + SCHED_DEADLINE + */ +static inline int +is_current_high_prio_class_task(int cpu) +{ + struct task_struct *curr = READ_ONCE(cpu_rq(cpu)->curr); + return (task_has_rt_policy(curr) | task_has_dl_policy(curr)); +} + +#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE +static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env, int cpu_cost) +{ + int wakeup_latency; + int prev_cpu = env->prev_cpu; + + wakeup_latency = cpu_rq(cpu)->wakeup_latency; + + if (env->need_idle) { + stats->min_cost = cpu_cost; + if (idle_cpu(cpu)) { + if (wakeup_latency < stats->best_cpu_wakeup_latency || + (wakeup_latency == stats->best_cpu_wakeup_latency && + cpu == prev_cpu)) { + stats->best_idle_cpu = cpu; + stats->best_cpu_wakeup_latency = wakeup_latency; + } + } else { + if (env->cpu_load < stats->min_load || + (env->cpu_load == stats->min_load && + cpu == prev_cpu)) { + stats->least_loaded_cpu = cpu; + stats->min_load = env->cpu_load; + } + } + + return; + } + + if (cpu_cost < stats->min_cost) { + stats->min_cost = cpu_cost; + stats->best_cpu_wakeup_latency = wakeup_latency; + stats->best_load = env->cpu_load; + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_CPU_COST; + return; + } + + /* + * We try to escape of selecting CPUs with running RT + * class tasks, if a power coast is the same. A reason + * is to reduce a latency, since RT task may not be + * preempted for a long time. + */ + if (is_current_high_prio_class_task(stats->best_cpu) && + !is_current_high_prio_class_task(cpu)) { + stats->best_cpu_wakeup_latency = wakeup_latency; + stats->best_load = env->cpu_load; + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_SKIP_RT_TASK; + return; + } + + if (!is_current_high_prio_class_task(stats->best_cpu) && + is_current_high_prio_class_task(cpu)) + return; + + /* CPU cost is the same. Start breaking the tie by C-state */ + + if (wakeup_latency > stats->best_cpu_wakeup_latency) + return; + + if (wakeup_latency < stats->best_cpu_wakeup_latency) { + stats->best_cpu_wakeup_latency = wakeup_latency; + stats->best_load = env->cpu_load; + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER; + return; + } + + /* C-state is the same. Use prev CPU to break the tie */ + if (cpu == prev_cpu) { + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER; + return; + } + + if (stats->best_cpu != prev_cpu && + ((wakeup_latency == 0 && env->cpu_load < stats->best_load) || + (wakeup_latency > 0 && env->cpu_load > stats->best_load))) { + stats->best_load = env->cpu_load; + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD; + } +} +#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */ +static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env, int cpu_cost) +{ + int prev_cpu = env->prev_cpu; + + if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) { + if (stats->best_sibling_cpu_cost > cpu_cost || + (stats->best_sibling_cpu_cost == cpu_cost && + stats->best_sibling_cpu_load > env->cpu_load)) { + stats->best_sibling_cpu_cost = cpu_cost; + stats->best_sibling_cpu_load = env->cpu_load; + stats->best_sibling_cpu = cpu; + } + } + + if ((cpu_cost < stats->min_cost) || + ((stats->best_cpu != prev_cpu && + stats->min_load > env->cpu_load) || cpu == prev_cpu)) { + if (env->need_idle) { + if (idle_cpu(cpu)) { + stats->min_cost = cpu_cost; + stats->best_idle_cpu = cpu; + } + } else { + stats->min_cost = cpu_cost; + stats->min_load = env->cpu_load; + stats->best_cpu = cpu; + env->sbc_best_flag = SBC_FLAG_MIN_COST; + } + } +} +#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */ + +static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats, + struct cpu_select_env *env) +{ + int cpu_cost; + + /* + * We try to find the least loaded *busy* CPU irrespective + * of the power cost. + */ + if (env->pack_task) + cpu_cost = cpu_min_power_cost(cpu); + + else + cpu_cost = power_cost(cpu, task_load(env->p) + + cpu_cravg_sync(cpu, env->sync)); + + if (cpu_cost <= stats->min_cost) + __update_cluster_stats(cpu, stats, env, cpu_cost); +} + +static void find_best_cpu_in_cluster(struct sched_cluster *c, + struct cpu_select_env *env, struct cluster_cpu_stats *stats) +{ + int i; + struct cpumask search_cpus; + + cpumask_and(&search_cpus, &env->search_cpus, &c->cpus); + + env->need_idle = wake_to_idle(env->p) || c->wake_up_idle; + + for_each_cpu(i, &search_cpus) { + env->cpu_load = cpu_load_sync(i, env->sync); + if (trace_sched_cpu_load_wakeup_enabled()) { + trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i), + sched_irqload(i), + power_cost(i, task_load(env->p) + + cpu_cravg_sync(i, env->sync)), 0); + } + if (skip_cpu(i, env)) + continue; + + update_spare_capacity(stats, env, i, c->capacity, + env->cpu_load); + + /* + * need_idle takes precedence over sched boost but when both + * are set, idlest CPU with in all the clusters is selected + * when boost_policy = BOOST_ON_ALL whereas idlest CPU in the + * big cluster is selected within boost_policy = BOOST_ON_BIG. + */ + if ((!env->need_idle && + env->boost_policy != SCHED_BOOST_NONE) || + env->need_waker_cluster || + sched_cpu_high_irqload(i) || + spill_threshold_crossed(env, cpu_rq(i))) + continue; + + update_cluster_stats(i, stats, env); + } +} + +static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats) +{ + stats->best_cpu = stats->best_idle_cpu = -1; + stats->best_capacity_cpu = stats->best_sibling_cpu = -1; + stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX; + stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX; + stats->highest_spare_capacity = 0; + stats->least_loaded_cpu = -1; + stats->best_cpu_wakeup_latency = INT_MAX; + /* No need to initialize stats->best_load */ +} + +static inline bool env_has_special_flags(struct cpu_select_env *env) +{ + if (env->need_idle || env->boost_policy != SCHED_BOOST_NONE || + env->reason) + return true; + + return false; +} + +static inline bool +bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats) +{ + int prev_cpu; + struct task_struct *task = env->p; + struct sched_cluster *cluster; + + if (!task->ravg.mark_start || !sched_short_sleep_task_threshold) + return false; + + prev_cpu = env->prev_cpu; + if (!cpumask_test_cpu(prev_cpu, &env->search_cpus)) + return false; + + if (task->ravg.mark_start - task->last_cpu_selected_ts >= + sched_long_cpu_selection_threshold) + return false; + + /* + * This function should be used by task wake up path only as it's + * assuming p->last_switch_out_ts as last sleep time. + * p->last_switch_out_ts can denote last preemption time as well as + * last sleep time. + */ + if (task->ravg.mark_start - task->last_switch_out_ts >= + sched_short_sleep_task_threshold) + return false; + + env->task_load = scale_load_to_cpu(task_load(task), prev_cpu); + cluster = cpu_rq(prev_cpu)->cluster; + + if (!task_load_will_fit(task, env->task_load, prev_cpu, + sched_boost_policy())) { + + __set_bit(cluster->id, env->backup_list); + __clear_bit(cluster->id, env->candidate_list); + return false; + } + + env->cpu_load = cpu_load_sync(prev_cpu, env->sync); + if (sched_cpu_high_irqload(prev_cpu) || + spill_threshold_crossed(env, cpu_rq(prev_cpu))) { + update_spare_capacity(stats, env, prev_cpu, + cluster->capacity, env->cpu_load); + cpumask_clear_cpu(prev_cpu, &env->search_cpus); + return false; + } + + return true; +} + +static inline bool +wake_to_waker_cluster(struct cpu_select_env *env) +{ + return env->sync && + task_load(current) > sched_big_waker_task_load && + task_load(env->p) < sched_small_wakee_task_load; +} + +static inline bool +bias_to_waker_cpu(struct cpu_select_env *env, int cpu) +{ + return sysctl_sched_prefer_sync_wakee_to_waker && + cpu_rq(cpu)->nr_running == 1 && + cpumask_test_cpu(cpu, &env->search_cpus); +} + +static inline int +cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster) +{ + return cpumask_intersects(&env->search_cpus, &cluster->cpus); +} + +/* return cheapest cpu that can fit this task */ +static int select_best_cpu(struct task_struct *p, int target, int reason, + int sync) +{ + struct sched_cluster *cluster, *pref_cluster = NULL; + struct cluster_cpu_stats stats; + struct related_thread_group *grp; + unsigned int sbc_flag = 0; + int cpu = raw_smp_processor_id(); + bool special; + + struct cpu_select_env env = { + .p = p, + .reason = reason, + .need_idle = wake_to_idle(p), + .need_waker_cluster = 0, + .sync = sync, + .prev_cpu = target, + .rtg = NULL, + .sbc_best_flag = 0, + .sbc_best_cluster_flag = 0, + .pack_task = false, + }; + + env.boost_policy = task_sched_boost(p) ? + sched_boost_policy() : SCHED_BOOST_NONE; + + bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS); + bitmap_zero(env.backup_list, NR_CPUS); + + cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask); + cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask); + + init_cluster_cpu_stats(&stats); + special = env_has_special_flags(&env); + + rcu_read_lock(); + + grp = task_related_thread_group(p); + + if (grp && grp->preferred_cluster) { + pref_cluster = grp->preferred_cluster; + if (!cluster_allowed(&env, pref_cluster)) + clear_bit(pref_cluster->id, env.candidate_list); + else + env.rtg = grp; + } else if (!special) { + cluster = cpu_rq(cpu)->cluster; + if (wake_to_waker_cluster(&env)) { + if (bias_to_waker_cpu(&env, cpu)) { + target = cpu; + sbc_flag = SBC_FLAG_WAKER_CLUSTER | + SBC_FLAG_WAKER_CPU; + goto out; + } else if (cluster_allowed(&env, cluster)) { + env.need_waker_cluster = 1; + bitmap_zero(env.candidate_list, NR_CPUS); + __set_bit(cluster->id, env.candidate_list); + env.sbc_best_cluster_flag = + SBC_FLAG_WAKER_CLUSTER; + } + } else if (bias_to_prev_cpu(&env, &stats)) { + sbc_flag = SBC_FLAG_PREV_CPU; + goto out; + } + } + + if (!special && is_short_burst_task(p)) { + env.pack_task = true; + sbc_flag = SBC_FLAG_PACK_TASK; + } +retry: + cluster = select_least_power_cluster(&env); + + if (!cluster) + goto out; + + /* + * 'cluster' now points to the minimum power cluster which can satisfy + * task's perf goals. Walk down the cluster list starting with that + * cluster. For non-small tasks, skip clusters that don't have + * mostly_idle/idle cpus + */ + + do { + find_best_cpu_in_cluster(cluster, &env, &stats); + + } while ((cluster = next_best_cluster(cluster, &env, &stats))); + + if (env.need_idle) { + if (stats.best_idle_cpu >= 0) { + target = stats.best_idle_cpu; + sbc_flag |= SBC_FLAG_IDLE_CSTATE; + } else if (stats.least_loaded_cpu >= 0) { + target = stats.least_loaded_cpu; + sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED; + } + } else if (stats.best_cpu >= 0) { + if (stats.best_sibling_cpu >= 0 && + stats.best_cpu != task_cpu(p) && + stats.min_cost == stats.best_sibling_cpu_cost) { + stats.best_cpu = stats.best_sibling_cpu; + sbc_flag |= SBC_FLAG_BEST_SIBLING; + } + sbc_flag |= env.sbc_best_flag; + target = stats.best_cpu; + } else { + if (env.rtg && env.boost_policy == SCHED_BOOST_NONE) { + env.rtg = NULL; + goto retry; + } + + /* + * With boost_policy == SCHED_BOOST_ON_BIG, we reach here with + * backup_list = little cluster, candidate_list = none and + * stats->best_capacity_cpu points the best spare capacity + * CPU among the CPUs in the big cluster. + */ + if (env.boost_policy == SCHED_BOOST_ON_BIG && + stats.best_capacity_cpu >= 0) + sbc_flag |= SBC_FLAG_BOOST_CLUSTER; + else + find_backup_cluster(&env, &stats); + + if (stats.best_capacity_cpu >= 0) { + target = stats.best_capacity_cpu; + sbc_flag |= SBC_FLAG_BEST_CAP_CPU; + } + } + p->last_cpu_selected_ts = sched_ktime_clock(); +out: + sbc_flag |= env.sbc_best_cluster_flag; + rcu_read_unlock(); + trace_sched_task_load(p, sched_boost_policy() && task_sched_boost(p), + env.reason, env.sync, env.need_idle, sbc_flag, target); + return target; +} + +#ifdef CONFIG_CFS_BANDWIDTH + +static inline struct task_group *next_task_group(struct task_group *tg) +{ + tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); + + return (&tg->list == &task_groups) ? NULL : tg; +} + +/* Iterate over all cfs_rq in a cpu */ +#define for_each_cfs_rq(cfs_rq, tg, cpu) \ + for (tg = container_of(&task_groups, struct task_group, list); \ + ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));) + +void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) +{ + struct task_group *tg; + struct cfs_rq *cfs_rq; + + rcu_read_lock(); + + for_each_cfs_rq(cfs_rq, tg, cpu) + reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra); + + rcu_read_unlock(); +} + +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq); + +static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra); +static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra); + +/* Add task's contribution to a cpu' HMP statistics */ +void _inc_hmp_sched_stats_fair(struct rq *rq, + struct task_struct *p, int change_cra) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + + /* + * Although below check is not strictly required (as + * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called + * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on + * efficiency by short-circuiting for_each_sched_entity() loop when + * sched_disable_window_stats + */ + if (sched_disable_window_stats) + return; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra); + if (cfs_rq_throttled(cfs_rq)) + break; + } + + /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */ + if (!se) + inc_rq_hmp_stats(rq, p, change_cra); +} + +/* Remove task's contribution from a cpu' HMP statistics */ +static void +_dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + + /* See comment on efficiency in _inc_hmp_sched_stats_fair */ + if (sched_disable_window_stats) + return; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + dec_cfs_rq_hmp_stats(cfs_rq, p, change_cra); + if (cfs_rq_throttled(cfs_rq)) + break; + } + + /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */ + if (!se) + dec_rq_hmp_stats(rq, p, change_cra); +} + +static void inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + _inc_hmp_sched_stats_fair(rq, p, 1); +} + +static void dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + _dec_hmp_sched_stats_fair(rq, p, 1); +} + +static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ + struct cfs_rq *cfs_rq; + struct sched_entity *se = &p->se; + s64 task_load_delta = (s64)new_task_load - task_load(p); + s64 pred_demand_delta = PRED_DEMAND_DELTA; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + + fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p, + task_load_delta, + pred_demand_delta); + fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta); + if (cfs_rq_throttled(cfs_rq)) + break; + } + + /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */ + if (!se) { + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, + task_load_delta, + pred_demand_delta); + fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta); + } +} + +static int task_will_be_throttled(struct task_struct *p); + +#else /* CONFIG_CFS_BANDWIDTH */ + +inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { } + +static void +inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + inc_nr_big_task(&rq->hmp_stats, p); + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + dec_nr_big_task(&rq->hmp_stats, p); + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} +static void +fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ + s64 task_load_delta = (s64)new_task_load - task_load(p); + s64 pred_demand_delta = PRED_DEMAND_DELTA; + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta, + pred_demand_delta); + fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta); +} + +static inline int task_will_be_throttled(struct task_struct *p) +{ + return 0; +} + +void _inc_hmp_sched_stats_fair(struct rq *rq, + struct task_struct *p, int change_cra) +{ + inc_nr_big_task(&rq->hmp_stats, p); +} + +#endif /* CONFIG_CFS_BANDWIDTH */ + +/* + * Reset balance_interval at all sched_domain levels of given cpu, so that it + * honors kick. + */ +static inline void reset_balance_interval(int cpu) +{ + struct sched_domain *sd; + + if (cpu >= nr_cpu_ids) + return; + + rcu_read_lock(); + for_each_domain(cpu, sd) + sd->balance_interval = 0; + rcu_read_unlock(); +} + +/* + * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal + * cpu as per its demand or priority) + * + * Returns reason why task needs to be migrated + */ +static inline int migration_needed(struct task_struct *p, int cpu) +{ + int nice; + struct related_thread_group *grp; + + if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1) + return 0; + + /* No need to migrate task that is about to be throttled */ + if (task_will_be_throttled(p)) + return 0; + + if (sched_boost_policy() == SCHED_BOOST_ON_BIG && + cpu_capacity(cpu) != max_capacity && task_sched_boost(p)) + return UP_MIGRATION; + + if (sched_cpu_high_irqload(cpu)) + return IRQLOAD_MIGRATION; + + nice = task_nice(p); + rcu_read_lock(); + grp = task_related_thread_group(p); + /* + * Don't assume higher capacity means higher power. If the task + * is running on the power efficient CPU, avoid migrating it + * to a lower capacity cluster. + */ + if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE || + upmigrate_discouraged(p)) && + cpu_capacity(cpu) > min_capacity && + cpu_max_power_cost(cpu) == max_power_cost) { + rcu_read_unlock(); + return DOWN_MIGRATION; + } + + if (!task_will_fit(p, cpu)) { + rcu_read_unlock(); + return UP_MIGRATION; + } + rcu_read_unlock(); + + return 0; +} + +static inline int +kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) +{ + unsigned long flags; + int rc = 0; + + /* Invoke active balance to force migrate currently running task */ + raw_spin_lock_irqsave(&rq->lock, flags); + if (!rq->active_balance) { + rq->active_balance = 1; + rq->push_cpu = new_cpu; + get_task_struct(p); + rq->push_task = p; + rc = 1; + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + return rc; +} + +static DEFINE_RAW_SPINLOCK(migration_lock); + +static bool do_migration(int reason, int new_cpu, int cpu) +{ + if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION) + && same_cluster(new_cpu, cpu)) + return false; + + /* Inter cluster high irqload migrations are OK */ + return new_cpu != cpu; +} + +/* + * Check if currently running task should be migrated to a better cpu. + * + * Todo: Effect this via changes to nohz_balancer_kick() and load balance? + */ +void check_for_migration(struct rq *rq, struct task_struct *p) +{ + int cpu = cpu_of(rq), new_cpu; + int active_balance = 0, reason; + + reason = migration_needed(p, cpu); + if (!reason) + return; + + raw_spin_lock(&migration_lock); + new_cpu = select_best_cpu(p, cpu, reason, 0); + + if (do_migration(reason, new_cpu, cpu)) { + active_balance = kick_active_balance(rq, p, new_cpu); + if (active_balance) + mark_reserved(new_cpu); + } + + raw_spin_unlock(&migration_lock); + + if (active_balance) + stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq, + &rq->active_balance_work); +} + +#ifdef CONFIG_CFS_BANDWIDTH + +static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) +{ + cfs_rq->hmp_stats.nr_big_tasks = 0; + cfs_rq->hmp_stats.cumulative_runnable_avg = 0; + cfs_rq->hmp_stats.pred_demands_sum = 0; +} + +static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) +{ + inc_nr_big_task(&cfs_rq->hmp_stats, p); + if (change_cra) + inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p); +} + +static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) +{ + dec_nr_big_task(&cfs_rq->hmp_stats, p); + if (change_cra) + dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p); +} + +static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, + struct cfs_rq *cfs_rq) +{ + stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks; + stats->cumulative_runnable_avg += + cfs_rq->hmp_stats.cumulative_runnable_avg; + stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum; +} + +static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, + struct cfs_rq *cfs_rq) +{ + stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks; + stats->cumulative_runnable_avg -= + cfs_rq->hmp_stats.cumulative_runnable_avg; + stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum; + + BUG_ON(stats->nr_big_tasks < 0 || + (s64)stats->cumulative_runnable_avg < 0); + BUG_ON((s64)stats->pred_demands_sum < 0); +} + +#else /* CONFIG_CFS_BANDWIDTH */ + +static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + +static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + +#endif /* CONFIG_CFS_BANDWIDTH */ + +#else /* CONFIG_SCHED_HMP */ + +static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { } + +static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + +static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + +#define dec_throttled_cfs_rq_hmp_stats(...) +#define inc_throttled_cfs_rq_hmp_stats(...) + +#endif /* CONFIG_SCHED_HMP */ + +#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10 +#error "load tracking assumes 2^10 as unit" +#endif + +#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) + +/* + * We can represent the historical contribution to runnable average as the + * coefficients of a geometric series. To do this we sub-divide our runnable + * history into segments of approximately 1ms (1024us); label the segment that + * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. + * + * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... + * p0 p1 p2 + * (now) (~1ms ago) (~2ms ago) + * + * Let u_i denote the fraction of p_i that the entity was runnable. + * + * We then designate the fractions u_i as our co-efficients, yielding the + * following representation of historical load: + * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... + * + * We choose y based on the with of a reasonably scheduling period, fixing: + * y^32 = 0.5 + * + * This means that the contribution to load ~32ms ago (u_32) will be weighted + * approximately half as much as the contribution to load within the last ms + * (u_0). + * + * When a period "rolls over" and we have new u_0`, multiplying the previous + * sum again by y is sufficient to update: + * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) + * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] + */ +static __always_inline int +__update_load_avg(u64 now, int cpu, struct sched_avg *sa, + unsigned long weight, int running, struct cfs_rq *cfs_rq) +{ + u64 delta, scaled_delta, periods; + u32 contrib; + unsigned int delta_w, scaled_delta_w, decayed = 0; + unsigned long scale_freq, scale_cpu; + + delta = now - sa->last_update_time; + /* + * This should only happen when time goes backwards, which it + * unfortunately does during sched clock init when we swap over to TSC. + */ + if ((s64)delta < 0) { + sa->last_update_time = now; + return 0; + } + + /* + * Use 1024ns as the unit of measurement since it's a reasonable + * approximation of 1us and fast to compute. + */ + delta >>= 10; + if (!delta) + return 0; + sa->last_update_time = now; + + scale_freq = arch_scale_freq_capacity(NULL, cpu); + scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + trace_sched_contrib_scale_f(cpu, scale_freq, scale_cpu); + + /* delta_w is the amount already accumulated against our next period */ + delta_w = sa->period_contrib; + if (delta + delta_w >= 1024) { + decayed = 1; + + /* how much left for next period will start over, we don't know yet */ + sa->period_contrib = 0; + + /* + * Now that we know we're crossing a period boundary, figure + * out how much from delta we need to complete the current + * period and accrue it. + */ + delta_w = 1024 - delta_w; + scaled_delta_w = cap_scale(delta_w, scale_freq); + if (weight) { + sa->load_sum += weight * scaled_delta_w; + if (cfs_rq) { + cfs_rq->runnable_load_sum += + weight * scaled_delta_w; + } + } + if (running) + sa->util_sum += scaled_delta_w * scale_cpu; + + delta -= delta_w; + + /* Figure out how many additional periods this update spans */ + periods = delta / 1024; + delta %= 1024; + + sa->load_sum = decay_load(sa->load_sum, periods + 1); + if (cfs_rq) { + cfs_rq->runnable_load_sum = + decay_load(cfs_rq->runnable_load_sum, periods + 1); + } + sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1); + + /* Efficiently calculate \sum (1..n_period) 1024*y^i */ + contrib = __compute_runnable_contrib(periods); + contrib = cap_scale(contrib, scale_freq); + if (weight) { + sa->load_sum += weight * contrib; + if (cfs_rq) + cfs_rq->runnable_load_sum += weight * contrib; + } + if (running) + sa->util_sum += contrib * scale_cpu; + } + + /* Remainder of delta accrued against u_0` */ + scaled_delta = cap_scale(delta, scale_freq); + if (weight) { + sa->load_sum += weight * scaled_delta; + if (cfs_rq) + cfs_rq->runnable_load_sum += weight * scaled_delta; + } + + if (running) + sa->util_sum += scaled_delta * scale_cpu; + + sa->period_contrib += delta; + + if (decayed) { + sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX); + if (cfs_rq) { + cfs_rq->runnable_load_avg = + div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX); + } + sa->util_avg = sa->util_sum / LOAD_AVG_MAX; + } + + return decayed; +} + +/* + * Signed add and clamp on underflow. + * + * Explicitly do a load-store to ensure the intermediate value never hits + * memory. This allows lockless observations without ever seeing the negative + * values. + */ +#define add_positive(_ptr, _val) do { \ + typeof(_ptr) ptr = (_ptr); \ + typeof(_val) val = (_val); \ + typeof(*ptr) res, var = READ_ONCE(*ptr); \ + \ + res = var + val; \ + \ + if (val < 0 && res > var) \ + res = 0; \ + \ + WRITE_ONCE(*ptr, res); \ +} while (0) + +#ifdef CONFIG_FAIR_GROUP_SCHED +/** + * update_tg_load_avg - update the tg's load avg + * @cfs_rq: the cfs_rq whose avg changed + * @force: update regardless of how small the difference + * + * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. + * However, because tg->load_avg is a global value there are performance + * considerations. + * + * In order to avoid having to look at the other cfs_rq's, we use a + * differential update where we store the last value we propagated. This in + * turn allows skipping updates if the differential is 'small'. + * + * Updating tg's load_avg is necessary before update_cfs_share() (which is + * done) and effective_load() (which is not done because it is too costly). + */ +static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) +{ + long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; + + /* + * No need to update load_avg for root_task_group as it is not used. + */ + if (cfs_rq->tg == &root_task_group) + return; + + if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { + atomic_long_add(delta, &cfs_rq->tg->load_avg); + cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; + } +} + +/* + * Called within set_task_rq() right before setting a task's cpu. The + * caller only guarantees p->pi_lock is held; no other assumptions, + * including the state of rq->lock, should be made. + */ +void set_task_rq_fair(struct sched_entity *se, + struct cfs_rq *prev, struct cfs_rq *next) +{ + if (!sched_feat(ATTACH_AGE_LOAD)) + return; + + /* + * We are supposed to update the task to "current" time, then its up to + * date and ready to go to new CPU/cfs_rq. But we have difficulty in + * getting what current time is, so simply throw away the out-of-date + * time. This will result in the wakee task is less decayed, but giving + * the wakee more load sounds not bad. + */ + if (se->avg.last_update_time && prev) { + u64 p_last_update_time; + u64 n_last_update_time; + +#ifndef CONFIG_64BIT + u64 p_last_update_time_copy; + u64 n_last_update_time_copy; + + do { + p_last_update_time_copy = prev->load_last_update_time_copy; + n_last_update_time_copy = next->load_last_update_time_copy; smp_rmb(); @@ -3120,36 +4216,6 @@ static inline int propagate_entity_load_avg(struct sched_entity *se) return 1; } -/* - * Check if we need to update the load and the utilization of a blocked - * group_entity: - */ -static inline bool skip_blocked_update(struct sched_entity *se) -{ - struct cfs_rq *gcfs_rq = group_cfs_rq(se); - - /* - * If sched_entity still have not zero load or utilization, we have to - * decay it: - */ - if (se->avg.load_avg || se->avg.util_avg) - return false; - - /* - * If there is a pending propagation, we have to update the load and - * the utilization of the sched_entity: - */ - if (gcfs_rq->propagate_avg) - return false; - - /* - * Otherwise, the load and the utilization of the sched_entity is - * already zero and there is no pending propagation, so it will be a - * waste of time to try to decay it: - */ - return true; -} - #else /* CONFIG_FAIR_GROUP_SCHED */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} @@ -3267,7 +4333,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) */ #define UPDATE_TG 0x1 #define SKIP_AGE_LOAD 0x2 -#define SKIP_CPUFREQ 0x4 /* Update task and its cfs_rq load average */ static inline void update_load_avg(struct sched_entity *se, int flags) @@ -3288,7 +4353,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags) cfs_rq->curr == se, NULL); } - decayed = update_cfs_rq_load_avg(now, cfs_rq, !(flags & SKIP_CPUFREQ)); + decayed = update_cfs_rq_load_avg(now, cfs_rq, true); decayed |= propagate_entity_load_avg(se); if (decayed && (flags & UPDATE_TG)) @@ -3464,7 +4529,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) #define UPDATE_TG 0x0 #define SKIP_AGE_LOAD 0x0 -#define SKIP_CPUFREQ 0x0 static inline void update_load_avg(struct sched_entity *se, int not_used1){} static inline void @@ -3483,6 +4547,12 @@ static inline int idle_balance(struct rq *rq) return 0; } +static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + +static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, + struct task_struct *p, int change_cra) { } + #endif /* CONFIG_SMP */ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -3681,8 +4751,6 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { - int update_flags; - /* * Update run-time statistics of the 'current'. */ @@ -3696,12 +4764,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * - For group entity, update its weight to reflect the new share * of its group cfs_rq. */ - update_flags = UPDATE_TG; - - if (flags & DEQUEUE_IDLE) - update_flags |= SKIP_CPUFREQ; - - update_load_avg(se, update_flags); + update_load_avg(se, UPDATE_TG); dequeue_entity_load_avg(cfs_rq, se); update_stats_dequeue(cfs_rq, se); @@ -4116,6 +5179,35 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) return cfs_bandwidth_used() && cfs_rq->throttled; } +#ifdef CONFIG_SCHED_HMP +/* + * Check if task is part of a hierarchy where some cfs_rq does not have any + * runtime left. + * + * We can't rely on throttled_hierarchy() to do this test, as + * cfs_rq->throttle_count will not be updated yet when this function is called + * from scheduler_tick() + */ +static int task_will_be_throttled(struct task_struct *p) +{ + struct sched_entity *se = &p->se; + struct cfs_rq *cfs_rq; + + if (!cfs_bandwidth_used()) + return 0; + + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + if (!cfs_rq->runtime_enabled) + continue; + if (cfs_rq->runtime_remaining <= 0) + return 1; + } + + return 0; +} +#endif + /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) { @@ -4195,6 +5287,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; + dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq); if (qcfs_rq->load.weight) dequeue = 0; @@ -4202,6 +5295,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { sub_nr_running(rq, task_delta); + dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq); } cfs_rq->throttled = 1; @@ -4227,6 +5321,14 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) start_cfs_bandwidth(cfs_b); raw_spin_unlock(&cfs_b->lock); + + /* Log effect on hmp stats after throttling */ + if (trace_sched_cpu_load_cgroup_enabled()) { + trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)), + sched_irqload(cpu_of(rq)), + power_cost(cpu_of(rq), 0), + cpu_temp(cpu_of(rq))); + } } void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) @@ -4236,6 +5338,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct sched_entity *se; int enqueue = 1; long task_delta; + struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -4263,6 +5366,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; + inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq); if (cfs_rq_throttled(cfs_rq)) break; @@ -4270,11 +5374,20 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { add_nr_running(rq, task_delta); + inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq); } /* determine whether we need to wake up potentially idle cpu */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); + + /* Log effect on hmp stats after un-throttling */ + if (trace_sched_cpu_load_cgroup_enabled()) { + trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)), + sched_irqload(cpu_of(rq)), + power_cost(cpu_of(rq), 0), + cpu_temp(cpu_of(rq))); + } } static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, @@ -4407,7 +5520,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) { struct hrtimer *refresh_timer = &cfs_b->period_timer; - s64 remaining; + u64 remaining; /* if the call-back is running a quota refresh is already occurring */ if (hrtimer_callback_running(refresh_timer)) @@ -4415,7 +5528,7 @@ static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) /* is a quota refresh about to occur? */ remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); - if (remaining < (s64)min_expire) + if (remaining < min_expire) return 1; return 0; @@ -4660,6 +5773,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; INIT_LIST_HEAD(&cfs_rq->throttled_list); + init_cfs_rq_hmp_stats(cfs_rq); } void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -4833,25 +5947,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; #ifdef CONFIG_SMP int task_new = flags & ENQUEUE_WAKEUP_NEW; - - /* - * Update SchedTune accounting. - * - * We do it before updating the CPU capacity to ensure the - * boost value of the current task is accounted for in the - * selection of the OPP. - * - * We do it also in the case where we enqueue a throttled task; - * we could argue that a throttled task should not boost a CPU, - * however: - * a) properly implementing CPU boosting considering throttled - * tasks will increase a lot the complexity of the solution - * b) it's not easy to quantify the benefits introduced by - * such a more complex solution. - * Thus, for the time being we go for the simple solution and boost - * also for throttled RQs. - */ - schedtune_enqueue_task(p, cpu_of(rq)); #endif /* @@ -4877,7 +5972,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running++; - walt_inc_cfs_cumulative_runnable_avg(cfs_rq, p); + inc_cfs_rq_hmp_stats(cfs_rq, p, 1); flags = ENQUEUE_WAKEUP; } @@ -4885,7 +5980,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running++; - walt_inc_cfs_cumulative_runnable_avg(cfs_rq, p); + inc_cfs_rq_hmp_stats(cfs_rq, p, 1); if (cfs_rq_throttled(cfs_rq)) break; @@ -4896,11 +5991,35 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) { add_nr_running(rq, 1); + + if (unlikely(p->nr_cpus_allowed == 1)) + rq->nr_pinned_tasks++; + + inc_rq_hmp_stats(rq, p, 1); } #ifdef CONFIG_SMP - if (!se) { - walt_inc_cumulative_runnable_avg(rq, p); + + /* + * Update SchedTune accounting. + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + * + * We do it also in the case where we enqueue a throttled task; + * we could argue that a throttled task should not boost a CPU, + * however: + * a) properly implementing CPU boosting considering throttled + * tasks will increase a lot the complexity of the solution + * b) it's not easy to quantify the benefits introduced by + * such a more complex solution. + * Thus, for the time being we go for the simple solution and boost + * also for throttled RQs. + */ + schedtune_enqueue_task(p, cpu_of(rq)); + + if (energy_aware() && !se) { if (!task_new && !rq->rd->overutilized && cpu_overutilized(rq->cpu)) { rq->rd->overutilized = true; @@ -4925,20 +6044,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; int task_sleep = flags & DEQUEUE_SLEEP; - if (task_sleep && rq->nr_running == 1) - flags |= DEQUEUE_IDLE; - -#ifdef CONFIG_SMP - /* - * Update SchedTune accounting - * - * We do it before updating the CPU capacity to ensure the - * boost value of the current task is accounted for in the - * selection of the OPP. - */ - schedtune_dequeue_task(p, cpu_of(rq)); -#endif - for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); @@ -4952,7 +6057,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (cfs_rq_throttled(cfs_rq)) break; cfs_rq->h_nr_running--; - walt_dec_cfs_cumulative_runnable_avg(cfs_rq, p); + dec_cfs_rq_hmp_stats(cfs_rq, p, 1); /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -4970,31 +6075,37 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) } for_each_sched_entity(se) { - int update_flags; - cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running--; - walt_dec_cfs_cumulative_runnable_avg(cfs_rq, p); + dec_cfs_rq_hmp_stats(cfs_rq, p, 1); if (cfs_rq_throttled(cfs_rq)) break; - update_flags = UPDATE_TG; - - if (flags & DEQUEUE_IDLE) - update_flags |= SKIP_CPUFREQ; - - update_load_avg(se, update_flags); + update_load_avg(se, UPDATE_TG); update_cfs_shares(se); } if (!se) { sub_nr_running(rq, 1); + + if (unlikely(p->nr_cpus_allowed == 1)) + rq->nr_pinned_tasks--; + + dec_rq_hmp_stats(rq, p, 1); } #ifdef CONFIG_SMP - if (!se) - walt_dec_cumulative_runnable_avg(rq, p); + + /* + * Update SchedTune accounting + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + */ + schedtune_dequeue_task(p, cpu_of(rq)); + #endif /* CONFIG_SMP */ hrtick_update(rq); @@ -5407,25 +6518,6 @@ unsigned long capacity_curr_of(int cpu) >> SCHED_CAPACITY_SHIFT; } -/* - * Returns the current capacity of cpu after applying both - * cpu and min freq scaling. - */ -unsigned long capacity_min_of(int cpu) -{ - if (!sched_feat(MIN_CAPACITY_CAPPING)) - return 0; - return arch_scale_cpu_capacity(NULL, cpu) * - arch_scale_min_freq_capacity(NULL, cpu) - >> SCHED_CAPACITY_SHIFT; -} - - -static inline bool energy_aware(void) -{ - return sched_feat(ENERGY_AWARE); -} - struct energy_env { struct sched_group *sg_top; struct sched_group *sg_cap; @@ -5493,15 +6585,6 @@ static unsigned long group_max_util(struct energy_env *eenv) util += eenv->util_delta; max_util = max(max_util, util); - - /* - * Take into account any minimum frequency imposed - * elsewhere which limits the energy states available - * If the MIN_CAPACITY_CAPPING feature is not enabled - * capacity_min_of will return 0 (not capped). - */ - max_util = max(max_util, capacity_min_of(cpu)); - } return max_util; @@ -5553,6 +6636,7 @@ static int find_new_capacity(struct energy_env *eenv, for (idx = 0; idx < sge->nr_cap_states; idx++) { if (sge->cap_states[idx].cap >= util) { + /* Keep track of SG's capacity index */ eenv->cap_idx = idx; break; } @@ -6034,12 +7118,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, static inline unsigned long task_util(struct task_struct *p) { -#ifdef CONFIG_SCHED_WALT - if (!walt_disabled && sysctl_sched_use_walt_task_util) { - unsigned long demand = p->ravg.demand; - return (demand << 10) / walt_ravg_window; - } -#endif return p->se.avg.util_avg; } @@ -6424,6 +7502,10 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) } } + if (!(current->flags & PF_WAKE_UP_IDLE) && + !(p->flags & PF_WAKE_UP_IDLE)) + return target; + /* * Otherwise, iterate the domains and find an elegible idle cpu. */ @@ -6534,7 +7616,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, unsigned long min_wake_util = ULONG_MAX; unsigned long target_max_spare_cap = 0; unsigned long best_active_util = ULONG_MAX; - unsigned long target_idle_max_spare_cap = 0; int best_idle_cstate = INT_MAX; struct sched_domain *sd; struct sched_group *sg; @@ -6570,7 +7651,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) { unsigned long capacity_curr = capacity_curr_of(i); unsigned long capacity_orig = capacity_orig_of(i); - unsigned long wake_util, new_util, min_capped_util; + unsigned long wake_util, new_util; if (!cpu_online(i)) continue; @@ -6592,16 +7673,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, * than the one required to boost the task. */ new_util = max(min_util, new_util); - - /* - * Include minimum capacity constraint: - * new_util contains the required utilization including - * boost. min_capped_util also takes into account a - * minimum capacity cap imposed on the CPU by external - * actors. - */ - min_capped_util = max(new_util, capacity_min_of(i)); - if (new_util > capacity_orig) continue; @@ -6724,12 +7795,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, /* Select idle CPU with lower cap_orig */ if (capacity_orig > best_idle_min_cap_orig) continue; - /* Favor CPUs that won't end up running at a - * high OPP. - */ - if ((capacity_orig - min_capped_util) < - target_idle_max_spare_cap) - continue; /* * Skip CPUs in deeper idle state, but only @@ -6743,8 +7808,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, /* Keep track of best idle CPU */ best_idle_min_cap_orig = capacity_orig; - target_idle_max_spare_cap = capacity_orig - - min_capped_util; best_idle_cstate = idle_idx; best_idle_cpu = i; continue; @@ -6775,11 +7838,10 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu, continue; /* Favor CPUs with maximum spare capacity */ - if ((capacity_orig - min_capped_util) < - target_max_spare_cap) + if ((capacity_orig - new_util) < target_max_spare_cap) continue; - target_max_spare_cap = capacity_orig - min_capped_util; + target_max_spare_cap = capacity_orig - new_util; target_capacity = capacity_orig; target_cpu = i; } @@ -6968,6 +8030,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f int want_affine = 0; int sync = wake_flags & WF_SYNC; +#ifdef CONFIG_SCHED_HMP + return select_best_cpu(p, prev_cpu, 0, sync); +#endif + if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p, sibling_count_hint) && @@ -7554,6 +8620,10 @@ enum group_type { #define LBF_NEED_BREAK 0x02 #define LBF_DST_PINNED 0x04 #define LBF_SOME_PINNED 0x08 +#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80 +#define LBF_IGNORE_BIG_TASKS 0x100 +#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200 +#define LBF_MOVED_RELATED_THREAD_GROUP_TASK 0x400 struct lb_env { struct sched_domain *sd; @@ -7571,6 +8641,8 @@ struct lb_env { unsigned int src_grp_nr_running; /* The set of CPUs under consideration for load-balancing */ struct cpumask *cpus; + unsigned int busiest_grp_capacity; + unsigned int busiest_nr_running; unsigned int flags; @@ -7581,6 +8653,7 @@ struct lb_env { enum fbq_type fbq_type; enum group_type busiest_group_type; struct list_head tasks; + enum sched_boost_policy boost_policy; }; /* @@ -7678,6 +8751,7 @@ static int can_migrate_task(struct task_struct *p, struct lb_env *env) { int tsk_cache_hot; + int twf, group_cpus; lockdep_assert_held(&env->src_rq->lock); @@ -7724,6 +8798,39 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) /* Record that we found atleast one task that could run on dst_cpu */ env->flags &= ~LBF_ALL_PINNED; + if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu)) { + if (nr_big_tasks(env->src_rq) && !is_big_task(p)) + return 0; + + if (env->boost_policy == SCHED_BOOST_ON_BIG && + !task_sched_boost(p)) + return 0; + } + + twf = task_will_fit(p, env->dst_cpu); + + /* + * Attempt to not pull tasks that don't fit. We may get lucky and find + * one that actually fits. + */ + if (env->flags & LBF_IGNORE_BIG_TASKS && !twf) + return 0; + + if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS && + !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p)) + return 0; + + /* + * Group imbalance can sometimes cause work to be pulled across groups + * even though the group could have managed the imbalance on its own. + * Prevent inter-cluster migrations for big tasks when the number of + * tasks is lower than the capacity of the group. + */ + group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity, + SCHED_CAPACITY_SCALE); + if (!twf && env->busiest_nr_running <= group_cpus) + return 0; + if (task_running(env->src_rq, p)) { schedstat_inc(p, se.statistics.nr_failed_migrations_running); return 0; @@ -7764,6 +8871,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env) deactivate_task(env->src_rq, p, 0); double_lock_balance(env->src_rq, env->dst_rq); set_task_cpu(p, env->dst_cpu); + if (task_in_related_thread_group(p)) + env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK; double_unlock_balance(env->src_rq, env->dst_rq); } @@ -7812,12 +8921,20 @@ static int detach_tasks(struct lb_env *env) struct task_struct *p; unsigned long load; int detached = 0; + int orig_loop = env->loop; lockdep_assert_held(&env->src_rq->lock); if (env->imbalance <= 0) return 0; + if (!same_cluster(env->dst_cpu, env->src_cpu)) + env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS; + + if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu)) + env->flags |= LBF_IGNORE_BIG_TASKS; + +redo: while (!list_empty(tasks)) { /* * We don't want to steal all, otherwise we may be treated likewise, @@ -7826,6 +8943,14 @@ static int detach_tasks(struct lb_env *env) if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) break; + /* + * Another CPU can place tasks, since we do not hold dst_rq lock + * while doing balancing. If newly idle CPU already got something, + * give up to reduce a latency. + */ + if (env->idle == CPU_NEWLY_IDLE && env->dst_rq->nr_running > 0) + break; + p = list_first_entry(tasks, struct task_struct, se.group_node); env->loop++; @@ -7856,8 +8981,9 @@ static int detach_tasks(struct lb_env *env) if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; - if ((load / 2) > env->imbalance) - goto next; + if (env->idle != CPU_NEWLY_IDLE) + if ((load / 2) > env->imbalance) + goto next; detach_task(p, env); list_add(&p->se.group_node, &env->tasks); @@ -7887,6 +9013,15 @@ next: list_move_tail(&p->se.group_node, tasks); } + if (env->flags & (LBF_IGNORE_BIG_TASKS | + LBF_IGNORE_PREFERRED_CLUSTER_TASKS) && !detached) { + tasks = &env->src_rq->cfs_tasks; + env->flags &= ~(LBF_IGNORE_BIG_TASKS | + LBF_IGNORE_PREFERRED_CLUSTER_TASKS); + env->loop = orig_loop; + goto redo; + } + /* * Right now, this is one of only two places we collect this stat * so we can safely collect detach_one_task() stats here rather @@ -7957,8 +9092,6 @@ static void update_blocked_averages(int cpu) * list_add_leaf_cfs_rq() for details. */ for_each_leaf_cfs_rq(rq, cfs_rq) { - struct sched_entity *se; - /* throttled entities do not contribute to load */ if (throttled_hierarchy(cfs_rq)) continue; @@ -7967,10 +9100,9 @@ static void update_blocked_averages(int cpu) true)) update_tg_load_avg(cfs_rq, 0); - /* Propagate pending load changes to the parent, if any: */ - se = cfs_rq->tg->se[cpu]; - if (se && !skip_blocked_update(se)) - update_load_avg(se, 0); + /* Propagate pending load changes to the parent */ + if (cfs_rq->tg->se[cpu]) + update_load_avg(cfs_rq->tg->se[cpu], 0); } raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -8053,6 +9185,10 @@ struct sg_lb_stats { unsigned long group_capacity; unsigned long group_util; /* Total utilization of the group */ unsigned int sum_nr_running; /* Nr tasks running in the group */ +#ifdef CONFIG_SCHED_HMP + unsigned long sum_nr_big_tasks; + u64 group_cpu_load; /* Scaled load of all CPUs of the group */ +#endif unsigned int idle_cpus; unsigned int group_weight; enum group_type group_type; @@ -8079,27 +9215,81 @@ struct sd_lb_stats { struct sg_lb_stats local_stat; /* Statistics of the local group */ }; -static inline void init_sd_lb_stats(struct sd_lb_stats *sds) +static inline void init_sd_lb_stats(struct sd_lb_stats *sds) +{ + /* + * Skimp on the clearing to avoid duplicate work. We can avoid clearing + * local_stat because update_sg_lb_stats() does a full clear/assignment. + * We must however clear busiest_stat::avg_load because + * update_sd_pick_busiest() reads this before assignment. + */ + *sds = (struct sd_lb_stats){ + .busiest = NULL, + .local = NULL, + .total_load = 0UL, + .total_capacity = 0UL, + .busiest_stat = { + .avg_load = 0UL, + .sum_nr_running = 0, + .group_type = group_other, +#ifdef CONFIG_SCHED_HMP + .sum_nr_big_tasks = 0UL, + .group_cpu_load = 0ULL, +#endif + }, + }; +} + +#ifdef CONFIG_SCHED_HMP + +static int +bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds) +{ + int local_cpu, busiest_cpu; + int local_capacity, busiest_capacity; + int local_pwr_cost, busiest_pwr_cost; + int nr_cpus; + int boost = sched_boost(); + + if (!sysctl_sched_restrict_cluster_spill || + boost == FULL_THROTTLE_BOOST || boost == CONSERVATIVE_BOOST) + return 0; + + local_cpu = group_first_cpu(sds->local); + busiest_cpu = group_first_cpu(sds->busiest); + + local_capacity = cpu_max_possible_capacity(local_cpu); + busiest_capacity = cpu_max_possible_capacity(busiest_cpu); + + local_pwr_cost = cpu_max_power_cost(local_cpu); + busiest_pwr_cost = cpu_max_power_cost(busiest_cpu); + + if (local_pwr_cost <= busiest_pwr_cost) + return 0; + + if (local_capacity > busiest_capacity && + sds->busiest_stat.sum_nr_big_tasks) + return 0; + + nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest)); + if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) && + (sds->busiest_stat.sum_nr_running < + nr_cpus * sysctl_sched_spill_nr_run)) + return 1; + + return 0; +} + +#else /* CONFIG_SCHED_HMP */ + +static inline int +bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds) { - /* - * Skimp on the clearing to avoid duplicate work. We can avoid clearing - * local_stat because update_sg_lb_stats() does a full clear/assignment. - * We must however clear busiest_stat::avg_load because - * update_sd_pick_busiest() reads this before assignment. - */ - *sds = (struct sd_lb_stats){ - .busiest = NULL, - .local = NULL, - .total_load = 0UL, - .total_capacity = 0UL, - .busiest_stat = { - .avg_load = 0UL, - .sum_nr_running = 0, - .group_type = group_other, - }, - }; + return 0; } +#endif /* CONFIG_SCHED_HMP */ + /** * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. @@ -8181,9 +9371,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) cpu_rq(cpu)->cpu_capacity_orig = capacity; - capacity *= arch_scale_max_freq_capacity(sd, cpu); - capacity >>= SCHED_CAPACITY_SHIFT; - mcc = &cpu_rq(cpu)->rd->max_cpu_capacity; raw_spin_lock_irqsave(&mcc->lock, flags); @@ -8246,6 +9433,8 @@ void update_group_capacity(struct sched_domain *sd, int cpu) struct sched_group_capacity *sgc; struct rq *rq = cpu_rq(cpu); + if (cpumask_test_cpu(cpu, cpu_isolated_mask)) + continue; /* * build_sched_domains() -> init_sched_groups_capacity() * gets here before we've attached the domains to the @@ -8277,9 +9466,14 @@ void update_group_capacity(struct sched_domain *sd, int cpu) do { struct sched_group_capacity *sgc = group->sgc; - capacity += sgc->capacity; - max_capacity = max(sgc->max_capacity, max_capacity); - min_capacity = min(sgc->min_capacity, min_capacity); + cpumask_t *cpus = sched_group_cpus(group); + + /* Revisit this later. This won't work for MT domain */ + if (!cpu_isolated(cpumask_first(cpus))) { + capacity += sgc->capacity; + max_capacity = max(sgc->max_capacity, max_capacity); + min_capacity = min(sgc->min_capacity, min_capacity); + } group = group->next; } while (group != child->groups); } @@ -8395,7 +9589,7 @@ group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref) static inline enum group_type group_classify(struct sched_group *group, - struct sg_lb_stats *sgs) + struct sg_lb_stats *sgs, struct lb_env *env) { if (sgs->group_no_capacity) return group_overloaded; @@ -8464,6 +9658,15 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { struct rq *rq = cpu_rq(i); + if (trace_sched_cpu_load_lb_enabled()) { + trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i), + sched_irqload(i), + power_cost(i, 0), + cpu_temp(i)); + } + if (cpu_isolated(i)) + continue; + /* if we are entering idle and there are CPUs with * their tick stopped, do an update for them */ @@ -8484,6 +9687,11 @@ static inline void update_sg_lb_stats(struct lb_env *env, if (nr_running > 1) *overload = true; +#ifdef CONFIG_SCHED_HMP + sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks; + sgs->group_cpu_load += cpu_load(i); +#endif + #ifdef CONFIG_NUMA_BALANCING sgs->nr_numa_running += rq->nr_numa_running; sgs->nr_preferred_running += rq->nr_preferred_running; @@ -8495,25 +9703,62 @@ static inline void update_sg_lb_stats(struct lb_env *env, if (!nr_running && idle_cpu(i)) sgs->idle_cpus++; - if (cpu_overutilized(i)) { + if (energy_aware() && cpu_overutilized(i)) { *overutilized = true; if (!sgs->group_misfit_task && rq->misfit_task) sgs->group_misfit_task = capacity_of(i); } } - /* Adjust by relative CPU capacity of the group */ - sgs->group_capacity = group->sgc->capacity; - sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; + /* Isolated CPU has no weight */ + if (!group->group_weight) { + sgs->group_capacity = 0; + sgs->avg_load = 0; + sgs->group_no_capacity = 1; + sgs->group_type = group_other; + sgs->group_weight = group->group_weight; + } else { + /* Adjust by relative CPU capacity of the group */ + sgs->group_capacity = group->sgc->capacity; + sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / + sgs->group_capacity; + + sgs->group_weight = group->group_weight; + + sgs->group_no_capacity = group_is_overloaded(env, sgs); + sgs->group_type = group_classify(group, sgs, env); + } if (sgs->sum_nr_running) sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; +} - sgs->group_weight = group->group_weight; +#ifdef CONFIG_SCHED_HMP +static bool update_sd_pick_busiest_active_balance(struct lb_env *env, + struct sd_lb_stats *sds, + struct sched_group *sg, + struct sg_lb_stats *sgs) +{ + if (env->idle != CPU_NOT_IDLE && + cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) { + if (sgs->sum_nr_big_tasks > + sds->busiest_stat.sum_nr_big_tasks) { + env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE; + return true; + } + } - sgs->group_no_capacity = group_is_overloaded(env, sgs); - sgs->group_type = group_classify(group, sgs); + return false; +} +#else +static bool update_sd_pick_busiest_active_balance(struct lb_env *env, + struct sd_lb_stats *sds, + struct sched_group *sg, + struct sg_lb_stats *sgs) +{ + return false; } +#endif /** * update_sd_pick_busiest - return 1 on busiest group @@ -8535,36 +9780,47 @@ static bool update_sd_pick_busiest(struct lb_env *env, { struct sg_lb_stats *busiest = &sds->busiest_stat; + if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs)) + return true; + if (sgs->group_type > busiest->group_type) return true; if (sgs->group_type < busiest->group_type) return false; - /* - * Candidate sg doesn't face any serious load-balance problems - * so don't pick it if the local sg is already filled up. - */ - if (sgs->group_type == group_other && - !group_has_capacity(env, &sds->local_stat)) - return false; - if (sgs->avg_load <= busiest->avg_load) return false; - if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) - goto asym_packing; - /* - * Candidate sg has no more than one task per CPU and - * has higher per-CPU capacity. Migrating tasks to less - * capable CPUs may harm throughput. Maximize throughput, - * power/energy consequences are not considered. + * Group has no more than one task per CPU */ - if (sgs->sum_nr_running <= sgs->group_weight && - group_smaller_cpu_capacity(sds->local, sg)) + if (sgs->sum_nr_running <= sgs->group_weight) return false; + if (energy_aware()) { + /* + * Candidate sg doesn't face any serious load-balance problems + * so don't pick it if the local sg is already filled up. + */ + if (sgs->group_type == group_other && + !group_has_capacity(env, &sds->local_stat)) + return false; + + if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) + goto asym_packing; + + /* + * Candidate sg has no more than one task per CPU and + * has higher per-CPU capacity. Migrating tasks to less + * capable CPUs may harm throughput. Maximize throughput, + * power/energy consequences are not considered. + */ + if (sgs->sum_nr_running <= sgs->group_weight && + group_smaller_cpu_capacity(sds->local, sg)) + return false; + } + asym_packing: /* This is the busiest node in its class. */ if (!(env->sd->flags & SD_ASYM_PACKING)) @@ -8671,14 +9927,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd group_has_capacity(env, &sds->local_stat) && (sgs->sum_nr_running > 1)) { sgs->group_no_capacity = 1; - sgs->group_type = group_classify(sg, sgs); + sgs->group_type = group_classify(sg, sgs, env); } /* * Ignore task groups with misfit tasks if local group has no * capacity or if per-cpu capacity isn't higher. */ - if (sgs->group_type == group_misfit_task && + if (energy_aware() && + sgs->group_type == group_misfit_task && (!group_has_capacity(env, &sds->local_stat) || !group_smaller_cpu_capacity(sg, sds->local))) sgs->group_type = group_other; @@ -8686,6 +9943,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd if (update_sd_pick_busiest(env, sds, sg, sgs)) { sds->busiest = sg; sds->busiest_stat = *sgs; + env->busiest_nr_running = sgs->sum_nr_running; + env->busiest_grp_capacity = sgs->group_capacity; } next_group: @@ -8707,12 +9966,12 @@ next_group: env->dst_rq->rd->overload = overload; /* Update over-utilization (tipping point, U >= 0) indicator */ - if (env->dst_rq->rd->overutilized != overutilized) { + if (energy_aware() && env->dst_rq->rd->overutilized != overutilized) { env->dst_rq->rd->overutilized = overutilized; trace_sched_overutilized(overutilized); } } else { - if (!env->dst_rq->rd->overutilized && overutilized) { + if (energy_aware() && !env->dst_rq->rd->overutilized && overutilized) { env->dst_rq->rd->overutilized = true; trace_sched_overutilized(true); } @@ -8864,20 +10123,22 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s */ if (busiest->avg_load <= sds->avg_load || local->avg_load >= sds->avg_load) { - /* Misfitting tasks should be migrated in any case */ - if (busiest->group_type == group_misfit_task) { - env->imbalance = busiest->group_misfit_task; - return; - } + if (energy_aware()) { + /* Misfitting tasks should be migrated in any case */ + if (busiest->group_type == group_misfit_task) { + env->imbalance = busiest->group_misfit_task; + return; + } - /* - * Busiest group is overloaded, local is not, use the spare - * cycles to maximize throughput - */ - if (busiest->group_type == group_overloaded && - local->group_type <= group_misfit_task) { - env->imbalance = busiest->load_per_task; - return; + /* + * Busiest group is overloaded, local is not, use the spare + * cycles to maximize throughput + */ + if (busiest->group_type == group_overloaded && + local->group_type <= group_misfit_task) { + env->imbalance = busiest->load_per_task; + return; + } } env->imbalance = 0; @@ -8914,7 +10175,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s ) / SCHED_CAPACITY_SCALE; /* Boost imbalance to allow misfit task to be balanced. */ - if (busiest->group_type == group_misfit_task) + if (energy_aware() && busiest->group_type == group_misfit_task) env->imbalance = max_t(long, env->imbalance, busiest->group_misfit_task); @@ -8975,6 +10236,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; + if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE) + goto force_balance; + + if (bail_inter_cluster_balance(env, &sds)) + goto out_balanced; + sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) / sds.total_capacity; @@ -8995,7 +10262,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env) goto force_balance; /* Misfitting tasks should be dealt with regardless of the avg load */ - if (busiest->group_type == group_misfit_task) { + if (energy_aware() && busiest->group_type == group_misfit_task) { goto force_balance; } @@ -9046,6 +10313,60 @@ out_balanced: return NULL; } +#ifdef CONFIG_SCHED_HMP +static struct rq *find_busiest_queue_hmp(struct lb_env *env, + struct sched_group *group) +{ + struct rq *busiest = NULL, *busiest_big = NULL; + u64 max_runnable_avg = 0, max_runnable_avg_big = 0; + int max_nr_big = 0, nr_big; + bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE); + int i; + cpumask_t cpus; + + cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask); + + for_each_cpu(i, &cpus) { + struct rq *rq = cpu_rq(i); + u64 cumulative_runnable_avg = + rq->hmp_stats.cumulative_runnable_avg; + + if (!cpumask_test_cpu(i, env->cpus)) + continue; + + + if (find_big) { + nr_big = nr_big_tasks(rq); + if (nr_big > max_nr_big || + (nr_big > 0 && nr_big == max_nr_big && + cumulative_runnable_avg > max_runnable_avg_big)) { + max_runnable_avg_big = cumulative_runnable_avg; + busiest_big = rq; + max_nr_big = nr_big; + continue; + } + } + + if (cumulative_runnable_avg > max_runnable_avg) { + max_runnable_avg = cumulative_runnable_avg; + busiest = rq; + } + } + + if (busiest_big) + return busiest_big; + + env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE; + return busiest; +} +#else +static inline struct rq *find_busiest_queue_hmp(struct lb_env *env, + struct sched_group *group) +{ + return NULL; +} +#endif + /* * find_busiest_queue - find the busiest runqueue among the cpus in group. */ @@ -9056,6 +10377,10 @@ static struct rq *find_busiest_queue(struct lb_env *env, unsigned long busiest_load = 0, busiest_capacity = 1; int i; +#ifdef CONFIG_SCHED_HMP + return find_busiest_queue_hmp(env, group); +#endif + for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { unsigned long capacity, wl; enum fbq_type rt; @@ -9135,6 +10460,9 @@ static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; + if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE) + return 1; + if (env->idle == CPU_NEWLY_IDLE) { /* @@ -9159,7 +10487,8 @@ static int need_active_balance(struct lb_env *env) return 1; } - if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) && + if (energy_aware() && + (capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) && ((capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu))) && env->src_rq->cfs.h_nr_running == 1 && cpu_overutilized(env->src_cpu) && @@ -9171,6 +10500,14 @@ static int need_active_balance(struct lb_env *env) sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD); } +static int group_balance_cpu_not_isolated(struct sched_group *sg) +{ + cpumask_t cpus; + + cpumask_and(&cpus, sched_group_cpus(sg), sched_group_mask(sg)); + cpumask_andnot(&cpus, &cpus, cpu_isolated_mask); + return cpumask_first(&cpus); +} static int should_we_balance(struct lb_env *env) { @@ -9189,7 +10526,8 @@ static int should_we_balance(struct lb_env *env) sg_mask = sched_group_mask(sg); /* Try to find first idle cpu */ for_each_cpu_and(cpu, sg_cpus, env->cpus) { - if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu)) + if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu) || + cpu_isolated(cpu)) continue; balance_cpu = cpu; @@ -9197,7 +10535,7 @@ static int should_we_balance(struct lb_env *env) } if (balance_cpu == -1) - balance_cpu = group_balance_cpu(sg); + balance_cpu = group_balance_cpu_not_isolated(sg); /* * First idle cpu or the first cpu(busiest) in this sched group @@ -9222,16 +10560,21 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); struct lb_env env = { - .sd = sd, - .dst_cpu = this_cpu, - .dst_rq = this_rq, - .dst_grpmask = sched_group_cpus(sd->groups), - .idle = idle, - .loop_break = sched_nr_migrate_break, - .cpus = cpus, - .fbq_type = all, - .tasks = LIST_HEAD_INIT(env.tasks), - .imbalance = 0, + .sd = sd, + .dst_cpu = this_cpu, + .dst_rq = this_rq, + .dst_grpmask = sched_group_cpus(sd->groups), + .idle = idle, + .loop_break = sched_nr_migrate_break, + .cpus = cpus, + .fbq_type = all, + .tasks = LIST_HEAD_INIT(env.tasks), + .imbalance = 0, + .flags = 0, + .loop = 0, + .busiest_nr_running = 0, + .busiest_grp_capacity = 0, + .boost_policy = sched_boost_policy(), }; /* @@ -9279,7 +10622,6 @@ redo: * correctly treated as an imbalance. */ env.flags |= LBF_ALL_PINNED; - env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); more_balance: raw_spin_lock_irqsave(&busiest->lock, flags); @@ -9292,6 +10634,12 @@ more_balance: goto no_move; } + /* + * Set loop_max when rq's lock is taken to prevent a race. + */ + env.loop_max = min(sysctl_sched_nr_migrate, + busiest->cfs.h_nr_running); + /* * cur_ld_moved - load moved in current iteration * ld_moved - cumulative load moved across iterations @@ -9381,16 +10729,20 @@ more_balance: no_move: if (!ld_moved) { - schedstat_inc(sd, lb_failed[idle]); + if (!(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE)) + schedstat_inc(sd, lb_failed[idle]); + /* * Increment the failure counter only on periodic balance. * We do not want newidle balance, which can be very * frequent, pollute the failure counter causing * excessive cache_hot migrations and active balances. */ - if (idle != CPU_NEWLY_IDLE) - if (env.src_grp_nr_running > 1) + if (idle != CPU_NEWLY_IDLE && + !(env.flags & LBF_BIG_TASK_ACTIVE_BALANCE)) { + if (env.src_grp_nr_running > 1) sd->nr_balance_failed++; + } if (need_active_balance(&env)) { raw_spin_lock_irqsave(&busiest->lock, flags); @@ -9412,7 +10764,8 @@ no_move: * ->active_balance_work. Once set, it's cleared * only after active load balance is finished. */ - if (!busiest->active_balance) { + if (!busiest->active_balance && + !cpu_isolated(cpu_of(busiest))) { busiest->active_balance = 1; busiest->push_cpu = this_cpu; active_balance = 1; @@ -9437,6 +10790,16 @@ no_move: } else { sd->nr_balance_failed = 0; + /* Assumes one 'busiest' cpu that we pulled tasks from */ + if (!same_freq_domain(this_cpu, cpu_of(busiest))) { + int check_groups = !!(env.flags & + LBF_MOVED_RELATED_THREAD_GROUP_TASK); + + check_for_freq_change(this_rq, false, check_groups); + check_for_freq_change(busiest, false, check_groups); + } else { + check_for_freq_change(this_rq, true, false); + } } if (likely(!active_balance)) { /* We were unbalanced, so reset the balancing interval */ @@ -9542,6 +10905,9 @@ static int idle_balance(struct rq *this_rq) int pulled_task = 0; u64 curr_cost = 0; + if (cpu_isolated(this_cpu)) + return 0; + idle_enter_fair(this_rq); /* @@ -9650,16 +11016,22 @@ static int active_load_balance_cpu_stop(void *data) struct rq *target_rq = cpu_rq(target_cpu); struct sched_domain *sd = NULL; struct task_struct *p = NULL; - struct task_struct *push_task; + struct task_struct *push_task = NULL; int push_task_detached = 0; struct lb_env env = { - .sd = sd, - .dst_cpu = target_cpu, - .dst_rq = target_rq, - .src_cpu = busiest_rq->cpu, - .src_rq = busiest_rq, - .idle = CPU_IDLE, + .sd = sd, + .dst_cpu = target_cpu, + .dst_rq = target_rq, + .src_cpu = busiest_rq->cpu, + .src_rq = busiest_rq, + .idle = CPU_IDLE, + .busiest_nr_running = 0, + .busiest_grp_capacity = 0, + .flags = 0, + .loop = 0, + .boost_policy = sched_boost_policy(), }; + bool moved = false; raw_spin_lock_irq(&busiest_rq->lock); @@ -9680,11 +11052,15 @@ static int active_load_balance_cpu_stop(void *data) BUG_ON(busiest_rq == target_rq); push_task = busiest_rq->push_task; + target_cpu = busiest_rq->push_cpu; if (push_task) { if (task_on_rq_queued(push_task) && - task_cpu(push_task) == busiest_cpu) { + push_task->state == TASK_RUNNING && + task_cpu(push_task) == busiest_cpu && + cpu_online(target_cpu)) { detach_task(push_task, &env); push_task_detached = 1; + moved = true; } goto out_unlock; } @@ -9705,6 +11081,7 @@ static int active_load_balance_cpu_stop(void *data) p = detach_one_task(&env); if (p) { schedstat_inc(sd, alb_pushed); + moved = true; } else { schedstat_inc(sd, alb_failed); } @@ -9712,6 +11089,8 @@ static int active_load_balance_cpu_stop(void *data) rcu_read_unlock(); out_unlock: busiest_rq->active_balance = 0; + push_task = busiest_rq->push_task; + target_cpu = busiest_rq->push_cpu; if (push_task) busiest_rq->push_task = NULL; @@ -9722,11 +11101,21 @@ out_unlock: if (push_task_detached) attach_one_task(target_rq, push_task); put_task_struct(push_task); + clear_reserved(target_cpu); } if (p) attach_one_task(target_rq, p); + if (moved && !same_freq_domain(busiest_cpu, target_cpu)) { + int check_groups = !!(env.flags & + LBF_MOVED_RELATED_THREAD_GROUP_TASK); + check_for_freq_change(busiest_rq, false, check_groups); + check_for_freq_change(target_rq, false, check_groups); + } else if (moved) { + check_for_freq_change(target_rq, true, false); + } + local_irq_enable(); return 0; @@ -9745,10 +11134,47 @@ static inline int on_null_domain(struct rq *rq) * load balancing for all the idle CPUs. */ -static inline int find_new_ilb(void) +#ifdef CONFIG_SCHED_HMP +static inline int find_new_hmp_ilb(int type) +{ + int call_cpu = raw_smp_processor_id(); + struct sched_domain *sd; + int ilb; + + rcu_read_lock(); + + /* Pick an idle cpu "closest" to call_cpu */ + for_each_domain(call_cpu, sd) { + for_each_cpu_and(ilb, nohz.idle_cpus_mask, + sched_domain_span(sd)) { + if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT || + cpu_max_power_cost(ilb) <= + cpu_max_power_cost(call_cpu))) { + rcu_read_unlock(); + reset_balance_interval(ilb); + return ilb; + } + } + } + + rcu_read_unlock(); + return nr_cpu_ids; +} +#else /* CONFIG_SCHED_HMP */ +static inline int find_new_hmp_ilb(int type) +{ + return 0; +} +#endif /* CONFIG_SCHED_HMP */ + +static inline int find_new_ilb(int type) { int ilb; +#ifdef CONFIG_SCHED_HMP + return find_new_hmp_ilb(type); +#endif + ilb = cpumask_first(nohz.idle_cpus_mask); if (ilb < nr_cpu_ids && idle_cpu(ilb)) @@ -9762,13 +11188,13 @@ static inline int find_new_ilb(void) * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle * CPU (if there is one). */ -static void nohz_balancer_kick(void) +static void nohz_balancer_kick(int type) { int ilb_cpu; nohz.next_balance++; - ilb_cpu = find_new_ilb(); + ilb_cpu = find_new_ilb(type); if (ilb_cpu >= nr_cpu_ids) return; @@ -9856,7 +11282,7 @@ void nohz_balance_enter_idle(int cpu) /* * If we're a completely isolated CPU, we don't play. */ - if (on_null_domain(cpu_rq(cpu))) + if (on_null_domain(cpu_rq(cpu)) || cpu_isolated(cpu)) return; cpumask_set_cpu(cpu, nohz.idle_cpus_mask); @@ -9885,7 +11311,13 @@ static DEFINE_SPINLOCK(balancing); */ void update_max_interval(void) { - max_load_balance_interval = HZ*num_online_cpus()/10; + cpumask_t avail_mask; + unsigned int available_cpus; + + cpumask_andnot(&avail_mask, cpu_online_mask, cpu_isolated_mask); + available_cpus = cpumask_weight(&avail_mask); + + max_load_balance_interval = HZ*available_cpus/10; } /* @@ -10010,12 +11442,15 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; + cpumask_t cpus; if (idle != CPU_IDLE || !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) goto end; - for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { + cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask); + + for_each_cpu(balance_cpu, &cpus) { if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) continue; @@ -10058,6 +11493,95 @@ end: clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); } +#ifdef CONFIG_SCHED_HMP +static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type) +{ + struct sched_domain *sd; + int i; + + if (rq->nr_running < 2) + return 0; + + if (!sysctl_sched_restrict_cluster_spill || + sched_boost_policy() == SCHED_BOOST_ON_ALL) + return 1; + + if (unlikely(rq->nr_pinned_tasks > 0)) { + int delta = rq->nr_running - rq->nr_pinned_tasks; + + /* + * Check if it is possible to "unload" this CPU in case + * of having pinned/affine tasks. Do not disturb idle + * core if one of the below condition is true: + * + * - there is one pinned task and it is not "current" + * - all tasks are pinned to this CPU + */ + if (delta < 2) + if (current->nr_cpus_allowed > 1 || !delta) + return 0; + } + + if (cpu_max_power_cost(cpu) == max_power_cost) + return 1; + + rcu_read_lock(); + sd = rcu_dereference_check_sched_domain(rq->sd); + if (!sd) { + rcu_read_unlock(); + return 0; + } + + for_each_cpu(i, sched_domain_span(sd)) { + if (cpu_load(i) < sched_spill_load && + cpu_rq(i)->nr_running < + sysctl_sched_spill_nr_run) { + /* Change the kick type to limit to CPUs that + * are of equal or lower capacity. + */ + *type = NOHZ_KICK_RESTRICT; + break; + } + } + rcu_read_unlock(); + return 1; +} +#else +static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type) +{ + return 0; +} +#endif + +static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type) +{ + unsigned long now = jiffies; + + /* + * None are in tickless mode and hence no need for NOHZ idle load + * balancing. + */ + if (likely(!atomic_read(&nohz.nr_cpus))) + return 0; + +#ifdef CONFIG_SCHED_HMP + return _nohz_kick_needed_hmp(rq, cpu, type); +#endif + + if (time_before(now, nohz.next_balance)) + return 0; + + if (rq->nr_running >= 2 && + (!energy_aware() || cpu_overutilized(cpu))) + return true; + + /* Do idle load balance if there have misfit task */ + if (energy_aware()) + return rq->misfit_task; + + return (rq->nr_running >= 2); +} + /* * Current heuristic for kicking the idle load balancer in the presence * of an idle cpu in the system. @@ -10069,12 +11593,14 @@ end: * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler * domain span are idle. */ -static inline bool nohz_kick_needed(struct rq *rq) +static inline bool nohz_kick_needed(struct rq *rq, int *type) { - unsigned long now = jiffies; +#ifndef CONFIG_SCHED_HMP struct sched_domain *sd; struct sched_group_capacity *sgc; - int nr_busy, cpu = rq->cpu; + int nr_busy; +#endif + int cpu = rq->cpu; bool kick = false; if (unlikely(rq->idle_balance)) @@ -10087,24 +11613,10 @@ static inline bool nohz_kick_needed(struct rq *rq) set_cpu_sd_state_busy(); nohz_balance_exit_idle(cpu); - /* - * None are in tickless mode and hence no need for NOHZ idle load - * balancing. - */ - if (likely(!atomic_read(&nohz.nr_cpus))) - return false; - - if (time_before(now, nohz.next_balance)) - return false; - - if (rq->nr_running >= 2 && - (!energy_aware() || cpu_overutilized(cpu))) + if (_nohz_kick_needed(rq, cpu, type)) return true; - /* Do idle load balance if there have misfit task */ - if (energy_aware()) - return rq->misfit_task; - +#ifndef CONFIG_SCHED_HMP rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_busy, cpu)); if (sd) { @@ -10136,6 +11648,7 @@ static inline bool nohz_kick_needed(struct rq *rq) unlock: rcu_read_unlock(); +#endif return kick; } #else @@ -10169,15 +11682,19 @@ static void run_rebalance_domains(struct softirq_action *h) */ void trigger_load_balance(struct rq *rq) { - /* Don't need to rebalance while attached to NULL domain */ - if (unlikely(on_null_domain(rq))) + int type = NOHZ_KICK_ANY; + + /* Don't need to rebalance while attached to NULL domain or + * cpu is isolated. + */ + if (unlikely(on_null_domain(rq)) || cpu_isolated(cpu_of(rq))) return; if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); #ifdef CONFIG_NO_HZ_COMMON - if (nohz_kick_needed(rq)) - nohz_balancer_kick(); + if (nohz_kick_needed(rq, &type)) + nohz_balancer_kick(type); #endif } @@ -10196,47 +11713,6 @@ static void rq_offline_fair(struct rq *rq) unthrottle_offline_cfs_rqs(rq); } -static inline int -kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) -{ - int rc = 0; - - /* Invoke active balance to force migrate currently running task */ - raw_spin_lock(&rq->lock); - if (!rq->active_balance) { - rq->active_balance = 1; - rq->push_cpu = new_cpu; - get_task_struct(p); - rq->push_task = p; - rc = 1; - } - raw_spin_unlock(&rq->lock); - - return rc; -} - -void check_for_migration(struct rq *rq, struct task_struct *p) -{ - int new_cpu; - int active_balance; - int cpu = task_cpu(p); - - if (rq->misfit_task) { - if (rq->curr->state != TASK_RUNNING || - rq->curr->nr_cpus_allowed == 1) - return; - - new_cpu = select_energy_cpu_brute(p, cpu, 0); - if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) { - active_balance = kick_active_balance(rq, p, new_cpu); - if (active_balance) - stop_one_cpu_nowait(cpu, - active_load_balance_cpu_stop, - rq, &rq->active_balance_work); - } - } -} - #endif /* CONFIG_SMP */ /* @@ -10256,7 +11732,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) task_tick_numa(rq, curr); #ifdef CONFIG_SMP - if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) { + if (energy_aware() && + !rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) { rq->rd->overutilized = true; trace_sched_overutilized(true); } @@ -10344,8 +11821,7 @@ static inline bool vruntime_normalized(struct task_struct *p) * - A task which has been woken up by try_to_wake_up() and * waiting for actually being woken up by sched_ttwu_pending(). */ - if (!se->sum_exec_runtime || - (p->state == TASK_WAKING && p->sched_class == &fair_sched_class)) + if (!se->sum_exec_runtime || p->state == TASK_WAKING) return true; return false; @@ -10757,6 +12233,11 @@ const struct sched_class fair_sched_class = { #ifdef CONFIG_FAIR_GROUP_SCHED .task_change_group = task_change_group_fair, #endif +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_fair, + .dec_hmp_sched_stats = dec_hmp_sched_stats_fair, + .fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair, +#endif }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 5b92acacc9fc..c30c48fde7e6 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -78,10 +78,3 @@ SCHED_FEAT(ENERGY_AWARE, true) #else SCHED_FEAT(ENERGY_AWARE, false) #endif -/* - * Minimum capacity capping. Keep track of minimum capacity factor when - * minimum frequency available to a policy is modified. - * If enabled, this can be used to inform the scheduler about capacity - * restrictions. - */ -SCHED_FEAT(MIN_CAPACITY_CAPPING, true) diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c new file mode 100644 index 000000000000..0c57468c7a22 --- /dev/null +++ b/kernel/sched/hmp.c @@ -0,0 +1,4496 @@ +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Implementation credits: Srivatsa Vaddagiri, Steve Muckle + * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti + * and Vikram Mulukutla + */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ + +#include +#include +#include + +#include "sched.h" + +#include + +#define CSTATE_LATENCY_GRANULARITY_SHIFT (6) + +const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK", + "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", + "IRQ_UPDATE"}; + +const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"}; + +static ktime_t ktime_last; +static bool sched_ktime_suspended; + +static bool use_cycle_counter; +static struct cpu_cycle_counter_cb cpu_cycle_counter_cb; + +u64 sched_ktime_clock(void) +{ + if (unlikely(sched_ktime_suspended)) + return ktime_to_ns(ktime_last); + return ktime_get_ns(); +} + +static void sched_resume(void) +{ + sched_ktime_suspended = false; +} + +static int sched_suspend(void) +{ + ktime_last = ktime_get(); + sched_ktime_suspended = true; + return 0; +} + +static struct syscore_ops sched_syscore_ops = { + .resume = sched_resume, + .suspend = sched_suspend +}; + +static int __init sched_init_ops(void) +{ + register_syscore_ops(&sched_syscore_ops); + return 0; +} +late_initcall(sched_init_ops); + +inline void clear_ed_task(struct task_struct *p, struct rq *rq) +{ + if (p == rq->ed_task) + rq->ed_task = NULL; +} + +inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock) +{ + p->last_switch_out_ts = wallclock; +} + +/* + * Note C-state for (idle) cpus. + * + * @cstate = cstate index, 0 -> active state + * @wakeup_energy = energy spent in waking up cpu + * @wakeup_latency = latency to wakeup from cstate + * + */ +void +sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency) +{ + struct rq *rq = cpu_rq(cpu); + + rq->cstate = cstate; /* C1, C2 etc */ + rq->wakeup_energy = wakeup_energy; + /* disregard small latency delta (64 us). */ + rq->wakeup_latency = ((wakeup_latency >> + CSTATE_LATENCY_GRANULARITY_SHIFT) << + CSTATE_LATENCY_GRANULARITY_SHIFT); +} + +/* + * Note D-state for (idle) cluster. + * + * @dstate = dstate index, 0 -> active state + * @wakeup_energy = energy spent in waking up cluster + * @wakeup_latency = latency to wakeup from cluster + * + */ +void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate, + int wakeup_energy, int wakeup_latency) +{ + struct sched_cluster *cluster = + cpu_rq(cpumask_first(cluster_cpus))->cluster; + cluster->dstate = dstate; + cluster->dstate_wakeup_energy = wakeup_energy; + cluster->dstate_wakeup_latency = wakeup_latency; +} + +u32 __weak get_freq_max_load(int cpu, u32 freq) +{ + /* 100% by default */ + return 100; +} + +struct freq_max_load_entry { + /* The maximum load which has accounted governor's headroom. */ + u64 hdemand; +}; + +struct freq_max_load { + struct rcu_head rcu; + int length; + struct freq_max_load_entry freqs[0]; +}; + +static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load); +static DEFINE_SPINLOCK(freq_max_load_lock); +static DEFINE_PER_CPU(u64, prev_group_runnable_sum); + +struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void) +{ + return NULL; +} + +int sched_update_freq_max_load(const cpumask_t *cpumask) +{ + int i, cpu, ret; + unsigned int freq; + struct cpu_pstate_pwr *costs; + struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats(); + struct freq_max_load *max_load, *old_max_load; + struct freq_max_load_entry *entry; + u64 max_demand_capacity, max_demand; + unsigned long flags; + u32 hfreq; + int hpct; + + if (!per_cpu_info) + return 0; + + spin_lock_irqsave(&freq_max_load_lock, flags); + max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity); + for_each_cpu(cpu, cpumask) { + if (!per_cpu_info[cpu].ptable) { + ret = -EINVAL; + goto fail; + } + + old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu)); + + /* + * allocate len + 1 and leave the last power cost as 0 for + * power_cost() can stop iterating index when + * per_cpu_info[cpu].len > len of max_load due to race between + * cpu power stats update and get_cpu_pwr_stats(). + */ + max_load = kzalloc(sizeof(struct freq_max_load) + + sizeof(struct freq_max_load_entry) * + (per_cpu_info[cpu].len + 1), GFP_ATOMIC); + if (unlikely(!max_load)) { + ret = -ENOMEM; + goto fail; + } + + max_load->length = per_cpu_info[cpu].len; + + max_demand = max_demand_capacity * + cpu_max_possible_capacity(cpu); + + i = 0; + costs = per_cpu_info[cpu].ptable; + while (costs[i].freq) { + entry = &max_load->freqs[i]; + freq = costs[i].freq; + hpct = get_freq_max_load(cpu, freq); + if (hpct <= 0 || hpct > 100) + hpct = 100; + hfreq = div64_u64((u64)freq * hpct, 100); + entry->hdemand = + div64_u64(max_demand * hfreq, + cpu_max_possible_freq(cpu)); + i++; + } + + rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load); + if (old_max_load) + kfree_rcu(old_max_load, rcu); + } + + spin_unlock_irqrestore(&freq_max_load_lock, flags); + return 0; + +fail: + for_each_cpu(cpu, cpumask) { + max_load = rcu_dereference(per_cpu(freq_max_load, cpu)); + if (max_load) { + rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL); + kfree_rcu(max_load, rcu); + } + } + + spin_unlock_irqrestore(&freq_max_load_lock, flags); + return ret; +} + +unsigned int max_possible_efficiency = 1; +unsigned int min_possible_efficiency = UINT_MAX; + +unsigned long __weak arch_get_cpu_efficiency(int cpu) +{ + return SCHED_LOAD_SCALE; +} + +/* Keep track of max/min capacity possible across CPUs "currently" */ +static void __update_min_max_capacity(void) +{ + int i; + int max_cap = 0, min_cap = INT_MAX; + + for_each_online_cpu(i) { + max_cap = max(max_cap, cpu_capacity(i)); + min_cap = min(min_cap, cpu_capacity(i)); + } + + max_capacity = max_cap; + min_capacity = min_cap; +} + +static void update_min_max_capacity(void) +{ + unsigned long flags; + int i; + + local_irq_save(flags); + for_each_possible_cpu(i) + raw_spin_lock(&cpu_rq(i)->lock); + + __update_min_max_capacity(); + + for_each_possible_cpu(i) + raw_spin_unlock(&cpu_rq(i)->lock); + local_irq_restore(flags); +} + +/* + * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that + * least efficient cpu gets capacity of 1024 + */ +static unsigned long +capacity_scale_cpu_efficiency(struct sched_cluster *cluster) +{ + return (1024 * cluster->efficiency) / min_possible_efficiency; +} + +/* + * Return 'capacity' of a cpu in reference to cpu with lowest max_freq + * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. + */ +static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster) +{ + return (1024 * cluster_max_freq(cluster)) / min_max_freq; +} + +/* + * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so + * that "most" efficient cpu gets a load_scale_factor of 1 + */ +static inline unsigned long +load_scale_cpu_efficiency(struct sched_cluster *cluster) +{ + return DIV_ROUND_UP(1024 * max_possible_efficiency, + cluster->efficiency); +} + +/* + * Return load_scale_factor of a cpu in reference to cpu with best max_freq + * (max_possible_freq), so that one with best max_freq gets a load_scale_factor + * of 1. + */ +static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster) +{ + return DIV_ROUND_UP(1024 * max_possible_freq, + cluster_max_freq(cluster)); +} + +static int compute_capacity(struct sched_cluster *cluster) +{ + int capacity = 1024; + + capacity *= capacity_scale_cpu_efficiency(cluster); + capacity >>= 10; + + capacity *= capacity_scale_cpu_freq(cluster); + capacity >>= 10; + + return capacity; +} + +static int compute_max_possible_capacity(struct sched_cluster *cluster) +{ + int capacity = 1024; + + capacity *= capacity_scale_cpu_efficiency(cluster); + capacity >>= 10; + + capacity *= (1024 * cluster->max_possible_freq) / min_max_freq; + capacity >>= 10; + + return capacity; +} + +static int compute_load_scale_factor(struct sched_cluster *cluster) +{ + int load_scale = 1024; + + /* + * load_scale_factor accounts for the fact that task load + * is in reference to "best" performing cpu. Task's load will need to be + * scaled (up) by a factor to determine suitability to be placed on a + * (little) cpu. + */ + load_scale *= load_scale_cpu_efficiency(cluster); + load_scale >>= 10; + + load_scale *= load_scale_cpu_freq(cluster); + load_scale >>= 10; + + return load_scale; +} + +struct list_head cluster_head; +static DEFINE_MUTEX(cluster_lock); +static cpumask_t all_cluster_cpus = CPU_MASK_NONE; +DECLARE_BITMAP(all_cluster_ids, NR_CPUS); +struct sched_cluster *sched_cluster[NR_CPUS]; +int num_clusters; + +unsigned int max_power_cost = 1; + +struct sched_cluster init_cluster = { + .list = LIST_HEAD_INIT(init_cluster.list), + .id = 0, + .max_power_cost = 1, + .min_power_cost = 1, + .capacity = 1024, + .max_possible_capacity = 1024, + .efficiency = 1, + .load_scale_factor = 1024, + .cur_freq = 1, + .max_freq = 1, + .max_mitigated_freq = UINT_MAX, + .min_freq = 1, + .max_possible_freq = 1, + .dstate = 0, + .dstate_wakeup_energy = 0, + .dstate_wakeup_latency = 0, + .exec_scale_factor = 1024, + .wake_up_idle = 0, +}; + +static void update_all_clusters_stats(void) +{ + struct sched_cluster *cluster; + u64 highest_mpc = 0, lowest_mpc = U64_MAX; + + pre_big_task_count_change(cpu_possible_mask); + + for_each_sched_cluster(cluster) { + u64 mpc; + + cluster->capacity = compute_capacity(cluster); + mpc = cluster->max_possible_capacity = + compute_max_possible_capacity(cluster); + cluster->load_scale_factor = compute_load_scale_factor(cluster); + + cluster->exec_scale_factor = + DIV_ROUND_UP(cluster->efficiency * 1024, + max_possible_efficiency); + + if (mpc > highest_mpc) + highest_mpc = mpc; + + if (mpc < lowest_mpc) + lowest_mpc = mpc; + } + + max_possible_capacity = highest_mpc; + min_max_possible_capacity = lowest_mpc; + + __update_min_max_capacity(); + sched_update_freq_max_load(cpu_possible_mask); + post_big_task_count_change(cpu_possible_mask); +} + +static void assign_cluster_ids(struct list_head *head) +{ + struct sched_cluster *cluster; + int pos = 0; + + list_for_each_entry(cluster, head, list) { + cluster->id = pos; + sched_cluster[pos++] = cluster; + } +} + +static void +move_list(struct list_head *dst, struct list_head *src, bool sync_rcu) +{ + struct list_head *first, *last; + + first = src->next; + last = src->prev; + + if (sync_rcu) { + INIT_LIST_HEAD_RCU(src); + synchronize_rcu(); + } + + first->prev = dst; + dst->prev = last; + last->next = dst; + + /* Ensure list sanity before making the head visible to all CPUs. */ + smp_mb(); + dst->next = first; +} + +static int +compare_clusters(void *priv, struct list_head *a, struct list_head *b) +{ + struct sched_cluster *cluster1, *cluster2; + int ret; + + cluster1 = container_of(a, struct sched_cluster, list); + cluster2 = container_of(b, struct sched_cluster, list); + + /* + * Don't assume higher capacity means higher power. If the + * power cost is same, sort the higher capacity cluster before + * the lower capacity cluster to start placing the tasks + * on the higher capacity cluster. + */ + ret = cluster1->max_power_cost > cluster2->max_power_cost || + (cluster1->max_power_cost == cluster2->max_power_cost && + cluster1->max_possible_capacity < + cluster2->max_possible_capacity); + + return ret; +} + +static void sort_clusters(void) +{ + struct sched_cluster *cluster; + struct list_head new_head; + unsigned int tmp_max = 1; + + INIT_LIST_HEAD(&new_head); + + for_each_sched_cluster(cluster) { + cluster->max_power_cost = power_cost(cluster_first_cpu(cluster), + max_task_load()); + cluster->min_power_cost = power_cost(cluster_first_cpu(cluster), + 0); + + if (cluster->max_power_cost > tmp_max) + tmp_max = cluster->max_power_cost; + } + max_power_cost = tmp_max; + + move_list(&new_head, &cluster_head, true); + + list_sort(NULL, &new_head, compare_clusters); + assign_cluster_ids(&new_head); + + /* + * Ensure cluster ids are visible to all CPUs before making + * cluster_head visible. + */ + move_list(&cluster_head, &new_head, false); +} + +static void +insert_cluster(struct sched_cluster *cluster, struct list_head *head) +{ + struct sched_cluster *tmp; + struct list_head *iter = head; + + list_for_each_entry(tmp, head, list) { + if (cluster->max_power_cost < tmp->max_power_cost) + break; + iter = &tmp->list; + } + + list_add(&cluster->list, iter); +} + +static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus) +{ + struct sched_cluster *cluster = NULL; + + cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC); + if (!cluster) { + __WARN_printf("Cluster allocation failed. \ + Possible bad scheduling\n"); + return NULL; + } + + INIT_LIST_HEAD(&cluster->list); + cluster->max_power_cost = 1; + cluster->min_power_cost = 1; + cluster->capacity = 1024; + cluster->max_possible_capacity = 1024; + cluster->efficiency = 1; + cluster->load_scale_factor = 1024; + cluster->cur_freq = 1; + cluster->max_freq = 1; + cluster->max_mitigated_freq = UINT_MAX; + cluster->min_freq = 1; + cluster->max_possible_freq = 1; + cluster->dstate = 0; + cluster->dstate_wakeup_energy = 0; + cluster->dstate_wakeup_latency = 0; + cluster->freq_init_done = false; + + raw_spin_lock_init(&cluster->load_lock); + cluster->cpus = *cpus; + cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus)); + + if (cluster->efficiency > max_possible_efficiency) + max_possible_efficiency = cluster->efficiency; + if (cluster->efficiency < min_possible_efficiency) + min_possible_efficiency = cluster->efficiency; + + atomic_set(&cluster->notifier_sent, 0); + return cluster; +} + +static void add_cluster(const struct cpumask *cpus, struct list_head *head) +{ + struct sched_cluster *cluster = alloc_new_cluster(cpus); + int i; + + if (!cluster) + return; + + for_each_cpu(i, cpus) + cpu_rq(i)->cluster = cluster; + + insert_cluster(cluster, head); + set_bit(num_clusters, all_cluster_ids); + num_clusters++; +} + +void update_cluster_topology(void) +{ + struct cpumask cpus = *cpu_possible_mask; + const struct cpumask *cluster_cpus; + struct list_head new_head; + int i; + + INIT_LIST_HEAD(&new_head); + + for_each_cpu(i, &cpus) { + cluster_cpus = cpu_coregroup_mask(i); + cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus); + cpumask_andnot(&cpus, &cpus, cluster_cpus); + add_cluster(cluster_cpus, &new_head); + } + + assign_cluster_ids(&new_head); + + /* + * Ensure cluster ids are visible to all CPUs before making + * cluster_head visible. + */ + move_list(&cluster_head, &new_head, false); + update_all_clusters_stats(); +} + +void init_clusters(void) +{ + int cpu; + + bitmap_clear(all_cluster_ids, 0, NR_CPUS); + init_cluster.cpus = *cpu_possible_mask; + atomic_set(&init_cluster.notifier_sent, 0); + raw_spin_lock_init(&init_cluster.load_lock); + INIT_LIST_HEAD(&cluster_head); + + for_each_possible_cpu(cpu) + per_cpu(prev_group_runnable_sum, cpu) = 0; +} + +int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb) +{ + mutex_lock(&cluster_lock); + if (!cb->get_cpu_cycle_counter) { + mutex_unlock(&cluster_lock); + return -EINVAL; + } + + cpu_cycle_counter_cb = *cb; + use_cycle_counter = true; + mutex_unlock(&cluster_lock); + + return 0; +} + +/* Clear any HMP scheduler related requests pending from or on cpu */ +void clear_hmp_request(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + clear_boost_kick(cpu); + clear_reserved(cpu); + if (rq->push_task) { + struct task_struct *push_task = NULL; + + raw_spin_lock_irqsave(&rq->lock, flags); + if (rq->push_task) { + clear_reserved(rq->push_cpu); + push_task = rq->push_task; + rq->push_task = NULL; + } + rq->active_balance = 0; + raw_spin_unlock_irqrestore(&rq->lock, flags); + if (push_task) + put_task_struct(push_task); + } +} + +int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost) +{ + struct rq *rq = cpu_rq(cpu); + + rq->static_cpu_pwr_cost = cost; + return 0; +} + +unsigned int sched_get_static_cpu_pwr_cost(int cpu) +{ + return cpu_rq(cpu)->static_cpu_pwr_cost; +} + +int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost) +{ + struct sched_cluster *cluster = cpu_rq(cpu)->cluster; + + cluster->static_cluster_pwr_cost = cost; + return 0; +} + +unsigned int sched_get_static_cluster_pwr_cost(int cpu) +{ + return cpu_rq(cpu)->cluster->static_cluster_pwr_cost; +} + +int sched_set_cluster_wake_idle(int cpu, unsigned int wake_idle) +{ + struct sched_cluster *cluster = cpu_rq(cpu)->cluster; + + cluster->wake_up_idle = !!wake_idle; + return 0; +} + +unsigned int sched_get_cluster_wake_idle(int cpu) +{ + return cpu_rq(cpu)->cluster->wake_up_idle; +} + +/* + * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy + * associated with them. This is required for atomic update of those variables + * when being modifed via sysctl interface. + * + * IMPORTANT: Initialize both copies to same value!! + */ + +/* + * Tasks that are runnable continuously for a period greather than + * EARLY_DETECTION_DURATION can be flagged early as potential + * high load tasks. + */ +#define EARLY_DETECTION_DURATION 9500000 + +static __read_mostly unsigned int sched_ravg_hist_size = 5; +__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5; + +static __read_mostly unsigned int sched_window_stats_policy = + WINDOW_STATS_MAX_RECENT_AVG; +__read_mostly unsigned int sysctl_sched_window_stats_policy = + WINDOW_STATS_MAX_RECENT_AVG; + +#define SCHED_ACCOUNT_WAIT_TIME 1 + +__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC); + +/* + * Enable colocation and frequency aggregation for all threads in a process. + * The children inherits the group id from the parent. + */ +unsigned int __read_mostly sysctl_sched_enable_thread_grouping; + + +#define SCHED_NEW_TASK_WINDOWS 5 + +#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0 + +/* + * This governs what load needs to be used when reporting CPU busy time + * to the cpufreq governor. + */ +__read_mostly unsigned int sysctl_sched_freq_reporting_policy; + +/* + * For increase, send notification if + * freq_required - cur_freq > sysctl_sched_freq_inc_notify + */ +__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */ + +/* + * For decrease, send notification if + * cur_freq - freq_required > sysctl_sched_freq_dec_notify + */ +__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */ + +static __read_mostly unsigned int sched_io_is_busy; + +__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024; + +/* + * Maximum possible frequency across all cpus. Task demand and cpu + * capacity (cpu_power) metrics are scaled in reference to it. + */ +unsigned int max_possible_freq = 1; + +/* + * Minimum possible max_freq across all cpus. This will be same as + * max_possible_freq on homogeneous systems and could be different from + * max_possible_freq on heterogenous systems. min_max_freq is used to derive + * capacity (cpu_power) of cpus. + */ +unsigned int min_max_freq = 1; + +unsigned int max_capacity = 1024; /* max(rq->capacity) */ +unsigned int min_capacity = 1024; /* min(rq->capacity) */ +unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */ +unsigned int +min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */ + +/* Min window size (in ns) = 10ms */ +#define MIN_SCHED_RAVG_WINDOW 10000000 + +/* Max window size (in ns) = 1s */ +#define MAX_SCHED_RAVG_WINDOW 1000000000 + +/* Window size (in ns) */ +__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW; + +/* Maximum allowed threshold before freq aggregation must be enabled */ +#define MAX_FREQ_AGGR_THRESH 1000 + +/* Temporarily disable window-stats activity on all cpus */ +unsigned int __read_mostly sched_disable_window_stats; + +struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID]; +static LIST_HEAD(active_related_thread_groups); +static DEFINE_RWLOCK(related_thread_group_lock); + +#define for_each_related_thread_group(grp) \ + list_for_each_entry(grp, &active_related_thread_groups, list) + +/* + * Task load is categorized into buckets for the purpose of top task tracking. + * The entire range of load from 0 to sched_ravg_window needs to be covered + * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket + * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value + * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute + * sched_load_granule. + */ +__read_mostly unsigned int sched_load_granule = + MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES; + +/* Size of bitmaps maintained to track top tasks */ +static const unsigned int top_tasks_bitmap_size = + BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long); + +/* + * Demand aggregation for frequency purpose: + * + * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads + * for frequency determination purpose. This aggregation is done per-cluster. + * + * CPU demand of tasks from various related groups is aggregated per-cluster and + * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined + * by just rq->prev_runnable_sum. + * + * Some examples follow, which assume: + * Cluster0 = CPU0-3, Cluster1 = CPU4-7 + * One related thread group A that has tasks A0, A1, A2 + * + * A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of + * tasks belonging to group A are accumulated when they run on cpu X. + * + * CX->curr/prev_sum = counters in which cpu execution stats of all tasks + * not belonging to group A are accumulated when they run on cpu X + * + * Lets say the stats for window M was as below: + * + * C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms + * Task A0 ran 5ms on CPU0 + * Task B0 ran 1ms on CPU0 + * + * C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms + * Task A1 ran 4ms on CPU1 + * Task A2 ran 2ms on CPU1 + * Task B1 ran 5ms on CPU1 + * + * C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0 + * CPU2 idle + * + * C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0 + * CPU3 idle + * + * In this case, CPU1 was most busy going by just its prev_sum counter. Demand + * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy + * time reported to governor will be: + * + * + * C0 busy time = 1ms + * C1 busy time = 5 + 5 + 6 = 16ms + * + */ +static __read_mostly unsigned int sched_freq_aggregate = 1; +__read_mostly unsigned int sysctl_sched_freq_aggregate = 1; + +unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct; +static unsigned int __read_mostly sched_freq_aggregate_threshold; + +/* Initial task load. Newly created tasks are assigned this load. */ +unsigned int __read_mostly sched_init_task_load_windows; +unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15; + +unsigned int max_task_load(void) +{ + return sched_ravg_window; +} + +/* A cpu can no longer accommodate more tasks if: + * + * rq->nr_running > sysctl_sched_spill_nr_run || + * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load + */ +unsigned int __read_mostly sysctl_sched_spill_nr_run = 10; + +/* + * Place sync wakee tasks those have less than configured demand to the waker's + * cluster. + */ +unsigned int __read_mostly sched_small_wakee_task_load; +unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10; + +unsigned int __read_mostly sched_big_waker_task_load; +unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25; + +/* + * CPUs with load greater than the sched_spill_load_threshold are not + * eligible for task placement. When all CPUs in a cluster achieve a + * load higher than this level, tasks becomes eligible for inter + * cluster migration. + */ +unsigned int __read_mostly sched_spill_load; +unsigned int __read_mostly sysctl_sched_spill_load_pct = 100; + +/* + * Prefer the waker CPU for sync wakee task, if the CPU has only 1 runnable + * task. This eliminates the LPM exit latency associated with the idle + * CPUs in the waker cluster. + */ +unsigned int __read_mostly sysctl_sched_prefer_sync_wakee_to_waker; + +/* + * Tasks whose bandwidth consumption on a cpu is more than + * sched_upmigrate are considered "big" tasks. Big tasks will be + * considered for "up" migration, i.e migrating to a cpu with better + * capacity. + */ +unsigned int __read_mostly sched_upmigrate; +unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80; + +/* + * Big tasks, once migrated, will need to drop their bandwidth + * consumption to less than sched_downmigrate before they are "down" + * migrated. + */ +unsigned int __read_mostly sched_downmigrate; +unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60; + +/* + * Task groups whose aggregate demand on a cpu is more than + * sched_group_upmigrate need to be up-migrated if possible. + */ +unsigned int __read_mostly sched_group_upmigrate; +unsigned int __read_mostly sysctl_sched_group_upmigrate_pct = 100; + +/* + * Task groups, once up-migrated, will need to drop their aggregate + * demand to less than sched_group_downmigrate before they are "down" + * migrated. + */ +unsigned int __read_mostly sched_group_downmigrate; +unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95; + +/* + * The load scale factor of a CPU gets boosted when its max frequency + * is restricted due to which the tasks are migrating to higher capacity + * CPUs early. The sched_upmigrate threshold is auto-upgraded by + * rq->max_possible_freq/rq->max_freq of a lower capacity CPU. + */ +unsigned int up_down_migrate_scale_factor = 1024; + +/* + * Scheduler selects and places task to its previous CPU if sleep time is + * less than sysctl_sched_select_prev_cpu_us. + */ +unsigned int __read_mostly +sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC; + +unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000; + +unsigned int __read_mostly +sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC; + +unsigned int __read_mostly sysctl_sched_restrict_cluster_spill; + +/* + * Scheduler tries to avoid waking up idle CPUs for tasks running + * in short bursts. If the task average burst is less than + * sysctl_sched_short_burst nanoseconds and it sleeps on an average + * for more than sysctl_sched_short_sleep nanoseconds, then the + * task is eligible for packing. + */ +unsigned int __read_mostly sysctl_sched_short_burst; +unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC; + +static void _update_up_down_migrate(unsigned int *up_migrate, + unsigned int *down_migrate, bool is_group) +{ + unsigned int delta; + + if (up_down_migrate_scale_factor == 1024) + return; + + delta = *up_migrate - *down_migrate; + + *up_migrate /= NSEC_PER_USEC; + *up_migrate *= up_down_migrate_scale_factor; + *up_migrate >>= 10; + *up_migrate *= NSEC_PER_USEC; + + if (!is_group) + *up_migrate = min(*up_migrate, sched_ravg_window); + + *down_migrate /= NSEC_PER_USEC; + *down_migrate *= up_down_migrate_scale_factor; + *down_migrate >>= 10; + *down_migrate *= NSEC_PER_USEC; + + *down_migrate = min(*down_migrate, *up_migrate - delta); +} + +static void update_up_down_migrate(void) +{ + unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct); + unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct); + + _update_up_down_migrate(&up_migrate, &down_migrate, false); + sched_upmigrate = up_migrate; + sched_downmigrate = down_migrate; + + up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct); + down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct); + + _update_up_down_migrate(&up_migrate, &down_migrate, true); + sched_group_upmigrate = up_migrate; + sched_group_downmigrate = down_migrate; +} + +void set_hmp_defaults(void) +{ + sched_spill_load = + pct_to_real(sysctl_sched_spill_load_pct); + + update_up_down_migrate(); + + sched_init_task_load_windows = + div64_u64((u64)sysctl_sched_init_task_load_pct * + (u64)sched_ravg_window, 100); + + sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us * + NSEC_PER_USEC; + + sched_small_wakee_task_load = + div64_u64((u64)sysctl_sched_small_wakee_task_load_pct * + (u64)sched_ravg_window, 100); + + sched_big_waker_task_load = + div64_u64((u64)sysctl_sched_big_waker_task_load_pct * + (u64)sched_ravg_window, 100); + + sched_freq_aggregate_threshold = + pct_to_real(sysctl_sched_freq_aggregate_threshold_pct); +} + +u32 sched_get_init_task_load(struct task_struct *p) +{ + return p->init_load_pct; +} + +int sched_set_init_task_load(struct task_struct *p, int init_load_pct) +{ + if (init_load_pct < 0 || init_load_pct > 100) + return -EINVAL; + + p->init_load_pct = init_load_pct; + + return 0; +} + +#ifdef CONFIG_CGROUP_SCHED + +int upmigrate_discouraged(struct task_struct *p) +{ + return task_group(p)->upmigrate_discouraged; +} + +#else + +static inline int upmigrate_discouraged(struct task_struct *p) +{ + return 0; +} + +#endif + +/* Is a task "big" on its current cpu */ +static inline int __is_big_task(struct task_struct *p, u64 scaled_load) +{ + int nice = task_nice(p); + + if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p)) + return 0; + + return scaled_load > sched_upmigrate; +} + +int is_big_task(struct task_struct *p) +{ + return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p))); +} + +u64 cpu_load(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu); +} + +u64 cpu_load_sync(int cpu, int sync) +{ + return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu); +} + +/* + * Task will fit on a cpu if it's bandwidth consumption on that cpu + * will be less than sched_upmigrate. A big task that was previously + * "up" migrated will be considered fitting on "little" cpu if its + * bandwidth consumption on "little" cpu will be less than + * sched_downmigrate. This will help avoid frequenty migrations for + * tasks with load close to the upmigrate threshold + */ +int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu, + enum sched_boost_policy boost_policy) +{ + int upmigrate = sched_upmigrate; + + if (cpu_capacity(cpu) == max_capacity) + return 1; + + if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu)) + upmigrate = sched_downmigrate; + + if (boost_policy != SCHED_BOOST_ON_BIG) { + if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE || + upmigrate_discouraged(p)) + return 1; + + if (task_load < upmigrate) + return 1; + } else { + if (task_sched_boost(p) || task_load >= upmigrate) + return 0; + + return 1; + } + + return 0; +} + +int task_will_fit(struct task_struct *p, int cpu) +{ + u64 tload = scale_load_to_cpu(task_load(p), cpu); + + return task_load_will_fit(p, tload, cpu, sched_boost_policy()); +} + +static int +group_will_fit(struct sched_cluster *cluster, struct related_thread_group *grp, + u64 demand, bool group_boost) +{ + int cpu = cluster_first_cpu(cluster); + int prev_capacity = 0; + unsigned int threshold = sched_group_upmigrate; + u64 load; + + if (cluster->capacity == max_capacity) + return 1; + + if (group_boost) + return 0; + + if (!demand) + return 1; + + if (grp->preferred_cluster) + prev_capacity = grp->preferred_cluster->capacity; + + if (cluster->capacity < prev_capacity) + threshold = sched_group_downmigrate; + + load = scale_load_to_cpu(demand, cpu); + if (load < threshold) + return 1; + + return 0; +} + +/* + * Return the cost of running task p on CPU cpu. This function + * currently assumes that task p is the only task which will run on + * the CPU. + */ +unsigned int power_cost(int cpu, u64 demand) +{ + int first, mid, last; + struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats(); + struct cpu_pstate_pwr *costs; + struct freq_max_load *max_load; + int total_static_pwr_cost = 0; + struct rq *rq = cpu_rq(cpu); + unsigned int pc; + + if (!per_cpu_info || !per_cpu_info[cpu].ptable) + /* + * When power aware scheduling is not in use, or CPU + * power data is not available, just use the CPU + * capacity as a rough stand-in for real CPU power + * numbers, assuming bigger CPUs are more power + * hungry. + */ + return cpu_max_possible_capacity(cpu); + + rcu_read_lock(); + max_load = rcu_dereference(per_cpu(freq_max_load, cpu)); + if (!max_load) { + pc = cpu_max_possible_capacity(cpu); + goto unlock; + } + + costs = per_cpu_info[cpu].ptable; + + if (demand <= max_load->freqs[0].hdemand) { + pc = costs[0].power; + goto unlock; + } else if (demand > max_load->freqs[max_load->length - 1].hdemand) { + pc = costs[max_load->length - 1].power; + goto unlock; + } + + first = 0; + last = max_load->length - 1; + mid = (last - first) >> 1; + while (1) { + if (demand <= max_load->freqs[mid].hdemand) + last = mid; + else + first = mid; + + if (last - first == 1) + break; + mid = first + ((last - first) >> 1); + } + + pc = costs[last].power; + +unlock: + rcu_read_unlock(); + + if (idle_cpu(cpu) && rq->cstate) { + total_static_pwr_cost += rq->static_cpu_pwr_cost; + if (rq->cluster->dstate) + total_static_pwr_cost += + rq->cluster->static_cluster_pwr_cost; + } + + return pc + total_static_pwr_cost; + +} + +void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p) +{ + if (sched_disable_window_stats) + return; + + if (is_big_task(p)) + stats->nr_big_tasks++; +} + +void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p) +{ + if (sched_disable_window_stats) + return; + + if (is_big_task(p)) + stats->nr_big_tasks--; + + BUG_ON(stats->nr_big_tasks < 0); +} + +void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) +{ + inc_nr_big_task(&rq->hmp_stats, p); + if (change_cra) + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) +{ + dec_nr_big_task(&rq->hmp_stats, p); + if (change_cra) + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra) +{ + stats->nr_big_tasks = 0; + if (reset_cra) { + stats->cumulative_runnable_avg = 0; + stats->pred_demands_sum = 0; + } +} + +int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) +{ + struct related_thread_group *grp; + int rc = 1; + + rcu_read_lock(); + + grp = task_related_thread_group(p); + if (grp) + rc = (grp->preferred_cluster == cluster); + + rcu_read_unlock(); + return rc; +} + +struct sched_cluster *rq_cluster(struct rq *rq) +{ + return rq->cluster; +} + +/* + * reset_cpu_hmp_stats - reset HMP stats for a cpu + * nr_big_tasks + * cumulative_runnable_avg (iff reset_cra is true) + */ +void reset_cpu_hmp_stats(int cpu, int reset_cra) +{ + reset_cfs_rq_hmp_stats(cpu, reset_cra); + reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra); +} + +void fixup_nr_big_tasks(struct hmp_sched_stats *stats, + struct task_struct *p, s64 delta) +{ + u64 new_task_load; + u64 old_task_load; + + if (sched_disable_window_stats) + return; + + old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p)); + new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p)); + + if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load)) + stats->nr_big_tasks--; + else if (!__is_big_task(p, old_task_load) && + __is_big_task(p, new_task_load)) + stats->nr_big_tasks++; + + BUG_ON(stats->nr_big_tasks < 0); +} + +/* + * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters. + */ +static void update_nr_big_tasks(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + struct task_struct *p; + + /* Do not reset cumulative_runnable_avg */ + reset_cpu_hmp_stats(cpu, 0); + + list_for_each_entry(p, &rq->cfs_tasks, se.group_node) + _inc_hmp_sched_stats_fair(rq, p, 0); +} + +/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */ +void pre_big_task_count_change(const struct cpumask *cpus) +{ + int i; + + local_irq_disable(); + + for_each_cpu(i, cpus) + raw_spin_lock(&cpu_rq(i)->lock); +} + +/* + * Reinitialize 'nr_big_tasks' counters on all affected cpus + */ +void post_big_task_count_change(const struct cpumask *cpus) +{ + int i; + + /* Assumes local_irq_disable() keeps online cpumap stable */ + for_each_cpu(i, cpus) + update_nr_big_tasks(i); + + for_each_cpu(i, cpus) + raw_spin_unlock(&cpu_rq(i)->lock); + + local_irq_enable(); +} + +DEFINE_MUTEX(policy_mutex); + +unsigned int update_freq_aggregate_threshold(unsigned int threshold) +{ + unsigned int old_threshold; + + mutex_lock(&policy_mutex); + + old_threshold = sysctl_sched_freq_aggregate_threshold_pct; + + sysctl_sched_freq_aggregate_threshold_pct = threshold; + sched_freq_aggregate_threshold = + pct_to_real(sysctl_sched_freq_aggregate_threshold_pct); + + mutex_unlock(&policy_mutex); + + return old_threshold; +} + +static inline int invalid_value_freq_input(unsigned int *data) +{ + if (data == &sysctl_sched_freq_aggregate) + return !(*data == 0 || *data == 1); + + return 0; +} + +static inline int invalid_value(unsigned int *data) +{ + unsigned int val = *data; + + if (data == &sysctl_sched_ravg_hist_size) + return (val < 2 || val > RAVG_HIST_SIZE_MAX); + + if (data == &sysctl_sched_window_stats_policy) + return val >= WINDOW_STATS_INVALID_POLICY; + + return invalid_value_freq_input(data); +} + +/* + * Handle "atomic" update of sysctl_sched_window_stats_policy, + * sysctl_sched_ravg_hist_size variables. + */ +int sched_window_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + unsigned int *data = (unsigned int *)table->data; + unsigned int old_val; + + mutex_lock(&policy_mutex); + + old_val = *data; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret || !write || (write && (old_val == *data))) + goto done; + + if (invalid_value(data)) { + *data = old_val; + ret = -EINVAL; + goto done; + } + + reset_all_window_stats(0, 0); + +done: + mutex_unlock(&policy_mutex); + + return ret; +} + +/* + * Convert percentage value into absolute form. This will avoid div() operation + * in fast path, to convert task load in percentage scale. + */ +int sched_hmp_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + unsigned int old_val; + unsigned int *data = (unsigned int *)table->data; + int update_task_count = 0; + + /* + * The policy mutex is acquired with cpu_hotplug.lock + * held from cpu_up()->cpufreq_governor_interactive()-> + * sched_set_window(). So enforce the same order here. + */ + if (write && (data == &sysctl_sched_upmigrate_pct)) { + update_task_count = 1; + get_online_cpus(); + } + + mutex_lock(&policy_mutex); + + old_val = *data; + + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret || !write) + goto done; + + if (write && (old_val == *data)) + goto done; + + if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct || + sysctl_sched_group_downmigrate_pct > + sysctl_sched_group_upmigrate_pct) { + *data = old_val; + ret = -EINVAL; + goto done; + } + + /* + * Big task tunable change will need to re-classify tasks on + * runqueue as big and set their counters appropriately. + * sysctl interface affects secondary variables (*_pct), which is then + * "atomically" carried over to the primary variables. Atomic change + * includes taking runqueue lock of all online cpus and re-initiatizing + * their big counter values based on changed criteria. + */ + if (update_task_count) + pre_big_task_count_change(cpu_online_mask); + + set_hmp_defaults(); + + if (update_task_count) + post_big_task_count_change(cpu_online_mask); + +done: + mutex_unlock(&policy_mutex); + if (update_task_count) + put_online_cpus(); + return ret; +} + +inline int nr_big_tasks(struct rq *rq) +{ + return rq->hmp_stats.nr_big_tasks; +} + +unsigned int cpu_temp(int cpu) +{ + struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats(); + + if (per_cpu_info) + return per_cpu_info[cpu].temp; + else + return 0; +} + +/* + * kfree() may wakeup kswapd. So this function should NOT be called + * with any CPU's rq->lock acquired. + */ +void free_task_load_ptrs(struct task_struct *p) +{ + kfree(p->ravg.curr_window_cpu); + kfree(p->ravg.prev_window_cpu); + + /* + * update_task_ravg() can be called for exiting tasks. While the + * function itself ensures correct behavior, the corresponding + * trace event requires that these pointers be NULL. + */ + p->ravg.curr_window_cpu = NULL; + p->ravg.prev_window_cpu = NULL; +} + +void init_new_task_load(struct task_struct *p) +{ + int i; + u32 init_load_windows = sched_init_task_load_windows; + u32 init_load_pct = current->init_load_pct; + + p->init_load_pct = 0; + rcu_assign_pointer(p->grp, NULL); + INIT_LIST_HEAD(&p->grp_list); + memset(&p->ravg, 0, sizeof(struct ravg)); + p->cpu_cycles = 0; + p->ravg.curr_burst = 0; + /* + * Initialize the avg_burst to twice the threshold, so that + * a task would not be classified as short burst right away + * after fork. It takes at least 6 sleep-wakeup cycles for + * the avg_burst to go below the threshold. + */ + p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst; + p->ravg.avg_sleep_time = 0; + + p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL); + p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL); + + /* Don't have much choice. CPU frequency would be bogus */ + BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu); + + if (init_load_pct) + init_load_windows = div64_u64((u64)init_load_pct * + (u64)sched_ravg_window, 100); + + p->ravg.demand = init_load_windows; + p->ravg.pred_demand = 0; + for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i) + p->ravg.sum_history[i] = init_load_windows; +} + +/* Return task demand in percentage scale */ +unsigned int pct_task_load(struct task_struct *p) +{ + unsigned int load; + + load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load()); + + return load; +} + +/* + * Return total number of tasks "eligible" to run on highest capacity cpu + * + * This is simply nr_big_tasks for cpus which are not of max_capacity and + * nr_running for cpus of max_capacity + */ +unsigned int nr_eligible_big_tasks(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + int nr_big = rq->hmp_stats.nr_big_tasks; + int nr = rq->nr_running; + + if (!is_max_capacity_cpu(cpu)) + return nr_big; + + return nr; +} + +static inline int exiting_task(struct task_struct *p) +{ + return (p->ravg.sum_history[0] == EXITING_TASK_MARKER); +} + +static int __init set_sched_ravg_window(char *str) +{ + unsigned int window_size; + + get_option(&str, &window_size); + + if (window_size < MIN_SCHED_RAVG_WINDOW || + window_size > MAX_SCHED_RAVG_WINDOW) { + WARN_ON(1); + return -EINVAL; + } + + sched_ravg_window = window_size; + return 0; +} + +early_param("sched_ravg_window", set_sched_ravg_window); + +static inline void +update_window_start(struct rq *rq, u64 wallclock) +{ + s64 delta; + int nr_windows; + + delta = wallclock - rq->window_start; + BUG_ON(delta < 0); + if (delta < sched_ravg_window) + return; + + nr_windows = div64_u64(delta, sched_ravg_window); + rq->window_start += (u64)nr_windows * (u64)sched_ravg_window; +} + +#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) + +static inline u64 scale_exec_time(u64 delta, struct rq *rq) +{ + u32 freq; + + freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time); + + /* + * For some reason, current frequency estimation + * can be far bigger than max available frequency. + * + * TODO: need to be investigated. As for now, take + * min as a workaround. + */ + freq = min(freq, max_possible_freq); + + delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq); + delta *= rq->cluster->exec_scale_factor; + delta >>= 10; + + return delta; +} + +static inline int cpu_is_waiting_on_io(struct rq *rq) +{ + if (!sched_io_is_busy) + return 0; + + return atomic_read(&rq->nr_iowait); +} + +/* Does freq_required sufficiently exceed or fall behind cur_freq? */ +static inline int +nearly_same_freq(unsigned int cur_freq, unsigned int freq_required) +{ + int delta = freq_required - cur_freq; + + if (freq_required > cur_freq) + return delta < sysctl_sched_freq_inc_notify; + + delta = -delta; + + return delta < sysctl_sched_freq_dec_notify; +} + +/* Convert busy time to frequency equivalent */ +static inline unsigned int load_to_freq(struct rq *rq, u64 load) +{ + unsigned int freq; + + load = scale_load_to_cpu(load, cpu_of(rq)); + load *= 128; + load = div64_u64(load, max_task_load()); + + freq = load * cpu_max_possible_freq(cpu_of(rq)); + freq /= 128; + + return freq; +} + +/* + * Return load from all related groups in given frequency domain. + */ +static void group_load_in_freq_domain(struct cpumask *cpus, + u64 *grp_load, u64 *new_grp_load) +{ + int j; + + for_each_cpu(j, cpus) { + struct rq *rq = cpu_rq(j); + + *grp_load += rq->grp_time.prev_runnable_sum; + *new_grp_load += rq->grp_time.nt_prev_runnable_sum; + } +} + +static inline u64 freq_policy_load(struct rq *rq, u64 load); +static inline void commit_prev_group_run_sum(struct rq *rq); +static inline u64 get_prev_group_run_sum(struct rq *rq); +/* + * Should scheduler alert governor for changing frequency? + * + * @check_pred - evaluate frequency based on the predictive demand + * @check_groups - add load from all related groups on given cpu + * + * check_groups is set to 1 if a "related" task movement/wakeup is triggering + * the notification check. To avoid "re-aggregation" of demand in such cases, + * we check whether the migrated/woken tasks demand (along with demand from + * existing tasks on the cpu) can be met on target cpu + * + */ + +static int send_notification(struct rq *rq, int check_pred, int check_groups) +{ + unsigned int cur_freq, freq_required; + int rc = 0; + u64 new_load, val = 0; + u32 prev_run_sum, group_run_sum; + + if (check_pred) { + u64 prev = rq->old_busy_time; + u64 predicted = rq->hmp_stats.pred_demands_sum; + + if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq))) + return 0; + + prev = max(prev, rq->old_estimated_time); + if (prev > predicted) + return 0; + + cur_freq = load_to_freq(rq, prev); + freq_required = load_to_freq(rq, predicted); + + if (freq_required < cur_freq + sysctl_sched_pred_alert_freq) + return 0; + } else { + val = get_prev_group_run_sum(rq); + group_run_sum = (u32) (val >> 32); + prev_run_sum = (u32) val; + + if (check_groups) + /* + * prev_run_sum and group_run_sum are synced + */ + new_load = prev_run_sum + group_run_sum; + else + new_load = prev_run_sum; + + new_load = freq_policy_load(rq, new_load); + cur_freq = load_to_freq(rq, rq->old_busy_time); + freq_required = load_to_freq(rq, new_load); + + if (nearly_same_freq(cur_freq, freq_required)) + return 0; + } + + if (!atomic_cmpxchg(&rq->cluster->notifier_sent, 0, 1)) { + rc = 1; + trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq, + new_load); + } + + return rc; +} + +/* Alert governor if there is a need to change frequency */ +void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) +{ + if (send_notification(rq, check_pred, check_groups)) { + atomic_notifier_call_chain( + &load_alert_notifier_head, 0, + (void *)(long) cpu_of(rq)); + } +} + +void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead, + struct task_struct *p) +{ + bool check_groups; + + rcu_read_lock(); + check_groups = task_in_related_thread_group(p); + rcu_read_unlock(); + + if (!same_freq_domain(src_cpu, dest_cpu)) { + if (!src_cpu_dead) + check_for_freq_change(cpu_rq(src_cpu), false, + check_groups); + check_for_freq_change(cpu_rq(dest_cpu), false, check_groups); + } else { + check_for_freq_change(cpu_rq(dest_cpu), true, check_groups); + } +} + +static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, + u64 irqtime, int event) +{ + if (is_idle_task(p)) { + /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */ + if (event == PICK_NEXT_TASK) + return 0; + + /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */ + return irqtime || cpu_is_waiting_on_io(rq); + } + + if (event == TASK_WAKE) + return 0; + + if (event == PUT_PREV_TASK || event == IRQ_UPDATE) + return 1; + + /* + * TASK_UPDATE can be called on sleeping task, when its moved between + * related groups + */ + if (event == TASK_UPDATE) { + if (rq->curr == p) + return 1; + + return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0; + } + + /* TASK_MIGRATE, PICK_NEXT_TASK left */ + return SCHED_FREQ_ACCOUNT_WAIT_TIME; +} + +static inline bool is_new_task(struct task_struct *p) +{ + return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS; +} + +#define INC_STEP 8 +#define DEC_STEP 2 +#define CONSISTENT_THRES 16 +#define INC_STEP_BIG 16 +/* + * bucket_increase - update the count of all buckets + * + * @buckets: array of buckets tracking busy time of a task + * @idx: the index of bucket to be incremented + * + * Each time a complete window finishes, count of bucket that runtime + * falls in (@idx) is incremented. Counts of all other buckets are + * decayed. The rate of increase and decay could be different based + * on current count in the bucket. + */ +static inline void bucket_increase(u8 *buckets, int idx) +{ + int i, step; + + for (i = 0; i < NUM_BUSY_BUCKETS; i++) { + if (idx != i) { + if (buckets[i] > DEC_STEP) + buckets[i] -= DEC_STEP; + else + buckets[i] = 0; + } else { + step = buckets[i] >= CONSISTENT_THRES ? + INC_STEP_BIG : INC_STEP; + if (buckets[i] > U8_MAX - step) + buckets[i] = U8_MAX; + else + buckets[i] += step; + } + } +} + +static inline int busy_to_bucket(u32 normalized_rt) +{ + int bidx; + + bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load()); + bidx = min(bidx, NUM_BUSY_BUCKETS - 1); + + /* + * Combine lowest two buckets. The lowest frequency falls into + * 2nd bucket and thus keep predicting lowest bucket is not + * useful. + */ + if (!bidx) + bidx++; + + return bidx; +} + +static inline u64 +scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq) +{ + return div64_u64(load * (u64)src_freq, (u64)dst_freq); +} + +/* + * get_pred_busy - calculate predicted demand for a task on runqueue + * + * @rq: runqueue of task p + * @p: task whose prediction is being updated + * @start: starting bucket. returned prediction should not be lower than + * this bucket. + * @runtime: runtime of the task. returned prediction should not be lower + * than this runtime. + * Note: @start can be derived from @runtime. It's passed in only to + * avoid duplicated calculation in some cases. + * + * A new predicted busy time is returned for task @p based on @runtime + * passed in. The function searches through buckets that represent busy + * time equal to or bigger than @runtime and attempts to find the bucket to + * to use for prediction. Once found, it searches through historical busy + * time and returns the latest that falls into the bucket. If no such busy + * time exists, it returns the medium of that bucket. + */ +static u32 get_pred_busy(struct rq *rq, struct task_struct *p, + int start, u32 runtime) +{ + int i; + u8 *buckets = p->ravg.busy_buckets; + u32 *hist = p->ravg.sum_history; + u32 dmin, dmax; + u64 cur_freq_runtime = 0; + int first = NUM_BUSY_BUCKETS, final; + u32 ret = runtime; + + /* skip prediction for new tasks due to lack of history */ + if (unlikely(is_new_task(p))) + goto out; + + /* find minimal bucket index to pick */ + for (i = start; i < NUM_BUSY_BUCKETS; i++) { + if (buckets[i]) { + first = i; + break; + } + } + /* if no higher buckets are filled, predict runtime */ + if (first >= NUM_BUSY_BUCKETS) + goto out; + + /* compute the bucket for prediction */ + final = first; + + /* determine demand range for the predicted bucket */ + if (final < 2) { + /* lowest two buckets are combined */ + dmin = 0; + final = 1; + } else { + dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS); + } + dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS); + + /* + * search through runtime history and return first runtime that falls + * into the range of predicted bucket. + */ + for (i = 0; i < sched_ravg_hist_size; i++) { + if (hist[i] >= dmin && hist[i] < dmax) { + ret = hist[i]; + break; + } + } + /* no historical runtime within bucket found, use average of the bin */ + if (ret < dmin) + ret = (dmin + dmax) / 2; + /* + * when updating in middle of a window, runtime could be higher + * than all recorded history. Always predict at least runtime. + */ + ret = max(runtime, ret); +out: + trace_sched_update_pred_demand(rq, p, runtime, + mult_frac((unsigned int)cur_freq_runtime, 100, + sched_ravg_window), ret); + return ret; +} + +static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p) +{ + if (p->ravg.pred_demand >= p->ravg.curr_window) + return p->ravg.pred_demand; + + return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window), + p->ravg.curr_window); +} + +/* + * predictive demand of a task is calculated at the window roll-over. + * if the task current window busy time exceeds the predicted + * demand, update it here to reflect the task needs. + */ +void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event) +{ + u32 new, old; + + if (is_idle_task(p) || exiting_task(p)) + return; + + if (event != PUT_PREV_TASK && event != TASK_UPDATE && + (!SCHED_FREQ_ACCOUNT_WAIT_TIME || + (event != TASK_MIGRATE && + event != PICK_NEXT_TASK))) + return; + + /* + * TASK_UPDATE can be called on sleeping task, when its moved between + * related groups + */ + if (event == TASK_UPDATE) { + if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME) + return; + } + + new = calc_pred_demand(rq, p); + old = p->ravg.pred_demand; + + if (old >= new) + return; + + if (task_on_rq_queued(p) && (!task_has_dl_policy(p) || + !p->dl.dl_throttled)) + p->sched_class->fixup_hmp_sched_stats(rq, p, + p->ravg.demand, + new); + + p->ravg.pred_demand = new; +} + +void clear_top_tasks_bitmap(unsigned long *bitmap) +{ + memset(bitmap, 0, top_tasks_bitmap_size); + __set_bit(NUM_LOAD_INDICES, bitmap); +} + +/* + * Special case the last index and provide a fast path for index = 0. + */ +static u32 top_task_load(struct rq *rq) +{ + int index = rq->prev_top; + u8 prev = 1 - rq->curr_table; + u32 sched_granule_load; + u32 ret_val = 0; + + sched_granule_load = READ_ONCE(sched_load_granule); + + if (!index) { + int msb = NUM_LOAD_INDICES - 1; + + if (test_bit(msb, rq->top_tasks_bitmap[prev])) + ret_val = sched_granule_load; + } else if (index == NUM_LOAD_INDICES - 1) { + ret_val = sched_ravg_window; + } else { + ret_val = (index + 1) * sched_granule_load; + } + + return ret_val; +} + +static u32 load_to_index(u32 load) +{ + u32 index = load / sched_load_granule; + + return min(index, (u32)(NUM_LOAD_INDICES - 1)); +} + +static void update_top_tasks(struct task_struct *p, struct rq *rq, + u32 old_curr_window, int new_window, bool full_window) +{ + u8 curr = rq->curr_table; + u8 prev = 1 - curr; + u8 *curr_table = rq->top_tasks[curr]; + u8 *prev_table = rq->top_tasks[prev]; + int old_index, new_index, update_index; + u32 curr_window = p->ravg.curr_window; + u32 prev_window = p->ravg.prev_window; + bool zero_index_update; + + if (old_curr_window == curr_window && !new_window) + return; + + old_index = load_to_index(old_curr_window); + new_index = load_to_index(curr_window); + + if (!new_window) { + zero_index_update = !old_curr_window && curr_window; + if (old_index != new_index || zero_index_update) { + if (old_curr_window) + curr_table[old_index] -= 1; + if (curr_window) + curr_table[new_index] += 1; + if (new_index > rq->curr_top) + rq->curr_top = new_index; + } + + if (!curr_table[old_index]) + __clear_bit(NUM_LOAD_INDICES - old_index - 1, + rq->top_tasks_bitmap[curr]); + + if (curr_table[new_index] == 1) + __set_bit(NUM_LOAD_INDICES - new_index - 1, + rq->top_tasks_bitmap[curr]); + + return; + } + + /* + * The window has rolled over for this task. By the time we get + * here, curr/prev swaps would has already occurred. So we need + * to use prev_window for the new index. + */ + update_index = load_to_index(prev_window); + + if (full_window) { + /* + * Two cases here. Either 'p' ran for the entire window or + * it didn't run at all. In either case there is no entry + * in the prev table. If 'p' ran the entire window, we just + * need to create a new entry in the prev table. In this case + * update_index will be correspond to sched_ravg_window + * so we can unconditionally update the top index. + */ + if (prev_window) { + prev_table[update_index] += 1; + rq->prev_top = update_index; + } + + if (prev_table[update_index] == 1) + __set_bit(NUM_LOAD_INDICES - update_index - 1, + rq->top_tasks_bitmap[prev]); + } else { + zero_index_update = !old_curr_window && prev_window; + if (old_index != update_index || zero_index_update) { + if (old_curr_window) + prev_table[old_index] -= 1; + + prev_table[update_index] += 1; + + if (update_index > rq->prev_top) + rq->prev_top = update_index; + + if (!prev_table[old_index]) + __clear_bit(NUM_LOAD_INDICES - old_index - 1, + rq->top_tasks_bitmap[prev]); + + if (prev_table[update_index] == 1) + __set_bit(NUM_LOAD_INDICES - update_index - 1, + rq->top_tasks_bitmap[prev]); + } + } + + if (curr_window) { + curr_table[new_index] += 1; + + if (new_index > rq->curr_top) + rq->curr_top = new_index; + + if (curr_table[new_index] == 1) + __set_bit(NUM_LOAD_INDICES - new_index - 1, + rq->top_tasks_bitmap[curr]); + } +} + +static inline void clear_top_tasks_table(u8 *table) +{ + memset(table, 0, NUM_LOAD_INDICES * sizeof(u8)); +} + +static void rollover_top_tasks(struct rq *rq, bool full_window) +{ + u8 curr_table = rq->curr_table; + u8 prev_table = 1 - curr_table; + int curr_top = rq->curr_top; + + clear_top_tasks_table(rq->top_tasks[prev_table]); + clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]); + + if (full_window) { + curr_top = 0; + clear_top_tasks_table(rq->top_tasks[curr_table]); + clear_top_tasks_bitmap( + rq->top_tasks_bitmap[curr_table]); + } + + rq->curr_table = prev_table; + rq->prev_top = curr_top; + rq->curr_top = 0; +} + +static u32 empty_windows[NR_CPUS]; + +static void rollover_task_window(struct task_struct *p, bool full_window) +{ + u32 *curr_cpu_windows = empty_windows; + u32 curr_window; + int i; + + /* Rollover the sum */ + curr_window = 0; + + if (!full_window) { + curr_window = p->ravg.curr_window; + curr_cpu_windows = p->ravg.curr_window_cpu; + } + + p->ravg.prev_window = curr_window; + p->ravg.curr_window = 0; + + /* Roll over individual CPU contributions */ + for (i = 0; i < nr_cpu_ids; i++) { + p->ravg.prev_window_cpu[i] = curr_cpu_windows[i]; + p->ravg.curr_window_cpu[i] = 0; + } +} + +static void rollover_cpu_window(struct rq *rq, bool full_window) +{ + u64 curr_sum = rq->curr_runnable_sum; + u64 nt_curr_sum = rq->nt_curr_runnable_sum; + u64 grp_curr_sum = rq->grp_time.curr_runnable_sum; + u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum; + + if (unlikely(full_window)) { + curr_sum = 0; + nt_curr_sum = 0; + grp_curr_sum = 0; + grp_nt_curr_sum = 0; + } + + rq->prev_runnable_sum = curr_sum; + rq->nt_prev_runnable_sum = nt_curr_sum; + rq->grp_time.prev_runnable_sum = grp_curr_sum; + rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum; + + rq->curr_runnable_sum = 0; + rq->nt_curr_runnable_sum = 0; + rq->grp_time.curr_runnable_sum = 0; + rq->grp_time.nt_curr_runnable_sum = 0; +} + +static inline void +commit_prev_group_run_sum(struct rq *rq) +{ + u64 val; + + val = rq->grp_time.prev_runnable_sum; + val = (val << 32) | rq->prev_runnable_sum; + WRITE_ONCE(per_cpu(prev_group_runnable_sum, cpu_of(rq)), val); +} + +static inline u64 +get_prev_group_run_sum(struct rq *rq) +{ + return READ_ONCE(per_cpu(prev_group_runnable_sum, cpu_of(rq))); +} + +/* + * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum) + */ +static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, + int event, u64 wallclock, u64 irqtime) +{ + int new_window, full_window = 0; + int p_is_curr_task = (p == rq->curr); + u64 mark_start = p->ravg.mark_start; + u64 window_start = rq->window_start; + u32 window_size = sched_ravg_window; + u64 delta; + u64 *curr_runnable_sum = &rq->curr_runnable_sum; + u64 *prev_runnable_sum = &rq->prev_runnable_sum; + u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; + u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; + bool new_task; + struct related_thread_group *grp; + int cpu = rq->cpu; + u32 old_curr_window = p->ravg.curr_window; + + new_window = mark_start < window_start; + if (new_window) { + full_window = (window_start - mark_start) >= window_size; + if (p->ravg.active_windows < USHRT_MAX) + p->ravg.active_windows++; + } + + new_task = is_new_task(p); + + /* + * Handle per-task window rollover. We don't care about the idle + * task or exiting tasks. + */ + if (!is_idle_task(p) && !exiting_task(p)) { + if (new_window) + rollover_task_window(p, full_window); + } + + if (p_is_curr_task && new_window) { + rollover_cpu_window(rq, full_window); + rollover_top_tasks(rq, full_window); + } + + if (!account_busy_for_cpu_time(rq, p, irqtime, event)) + goto done; + + grp = p->grp; + if (grp && sched_freq_aggregate) { + struct group_cpu_time *cpu_time = &rq->grp_time; + + curr_runnable_sum = &cpu_time->curr_runnable_sum; + prev_runnable_sum = &cpu_time->prev_runnable_sum; + + nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; + nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; + } + + if (!new_window) { + /* + * account_busy_for_cpu_time() = 1 so busy time needs + * to be accounted to the current window. No rollover + * since we didn't start a new window. An example of this is + * when a task starts execution and then sleeps within the + * same window. + */ + + if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) + delta = wallclock - mark_start; + else + delta = irqtime; + delta = scale_exec_time(delta, rq); + *curr_runnable_sum += delta; + if (new_task) + *nt_curr_runnable_sum += delta; + + if (!is_idle_task(p) && !exiting_task(p)) { + p->ravg.curr_window += delta; + p->ravg.curr_window_cpu[cpu] += delta; + } + + goto done; + } + + if (!p_is_curr_task) { + /* + * account_busy_for_cpu_time() = 1 so busy time needs + * to be accounted to the current window. A new window + * has also started, but p is not the current task, so the + * window is not rolled over - just split up and account + * as necessary into curr and prev. The window is only + * rolled over when a new window is processed for the current + * task. + * + * Irqtime can't be accounted by a task that isn't the + * currently running task. + */ + + if (!full_window) { + /* + * A full window hasn't elapsed, account partial + * contribution to previous completed window. + */ + delta = scale_exec_time(window_start - mark_start, rq); + if (!exiting_task(p)) { + p->ravg.prev_window += delta; + p->ravg.prev_window_cpu[cpu] += delta; + } + } else { + /* + * Since at least one full window has elapsed, + * the contribution to the previous window is the + * full window (window_size). + */ + delta = scale_exec_time(window_size, rq); + if (!exiting_task(p)) { + p->ravg.prev_window = delta; + p->ravg.prev_window_cpu[cpu] = delta; + } + } + + *prev_runnable_sum += delta; + if (new_task) + *nt_prev_runnable_sum += delta; + + /* Account piece of busy time in the current window. */ + delta = scale_exec_time(wallclock - window_start, rq); + *curr_runnable_sum += delta; + if (new_task) + *nt_curr_runnable_sum += delta; + + if (!exiting_task(p)) { + p->ravg.curr_window = delta; + p->ravg.curr_window_cpu[cpu] = delta; + } + + goto done; + } + + if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) { + /* + * account_busy_for_cpu_time() = 1 so busy time needs + * to be accounted to the current window. A new window + * has started and p is the current task so rollover is + * needed. If any of these three above conditions are true + * then this busy time can't be accounted as irqtime. + * + * Busy time for the idle task or exiting tasks need not + * be accounted. + * + * An example of this would be a task that starts execution + * and then sleeps once a new window has begun. + */ + + if (!full_window) { + /* + * A full window hasn't elapsed, account partial + * contribution to previous completed window. + */ + delta = scale_exec_time(window_start - mark_start, rq); + if (!is_idle_task(p) && !exiting_task(p)) { + p->ravg.prev_window += delta; + p->ravg.prev_window_cpu[cpu] += delta; + } + } else { + /* + * Since at least one full window has elapsed, + * the contribution to the previous window is the + * full window (window_size). + */ + delta = scale_exec_time(window_size, rq); + if (!is_idle_task(p) && !exiting_task(p)) { + p->ravg.prev_window = delta; + p->ravg.prev_window_cpu[cpu] = delta; + } + } + + /* + * Rollover is done here by overwriting the values in + * prev_runnable_sum and curr_runnable_sum. + */ + *prev_runnable_sum += delta; + if (new_task) + *nt_prev_runnable_sum += delta; + + /* Account piece of busy time in the current window. */ + delta = scale_exec_time(wallclock - window_start, rq); + *curr_runnable_sum += delta; + if (new_task) + *nt_curr_runnable_sum += delta; + + if (!is_idle_task(p) && !exiting_task(p)) { + p->ravg.curr_window = delta; + p->ravg.curr_window_cpu[cpu] = delta; + } + + goto done; + } + + if (irqtime) { + /* + * account_busy_for_cpu_time() = 1 so busy time needs + * to be accounted to the current window. A new window + * has started and p is the current task so rollover is + * needed. The current task must be the idle task because + * irqtime is not accounted for any other task. + * + * Irqtime will be accounted each time we process IRQ activity + * after a period of idleness, so we know the IRQ busy time + * started at wallclock - irqtime. + */ + + BUG_ON(!is_idle_task(p)); + mark_start = wallclock - irqtime; + + /* + * Roll window over. If IRQ busy time was just in the current + * window then that is all that need be accounted. + */ + if (mark_start > window_start) { + *curr_runnable_sum = scale_exec_time(irqtime, rq); + goto done; + } + + /* + * The IRQ busy time spanned multiple windows. Process the + * busy time preceding the current window start first. + */ + delta = window_start - mark_start; + if (delta > window_size) + delta = window_size; + delta = scale_exec_time(delta, rq); + *prev_runnable_sum += delta; + + /* Process the remaining IRQ busy time in the current window. */ + delta = wallclock - window_start; + rq->curr_runnable_sum = scale_exec_time(delta, rq); + } + +done: + commit_prev_group_run_sum(rq); + + if (!is_idle_task(p) && !exiting_task(p)) + update_top_tasks(p, rq, old_curr_window, + new_window, full_window); +} + +static inline u32 predict_and_update_buckets(struct rq *rq, + struct task_struct *p, u32 runtime) { + + int bidx; + u32 pred_demand; + + bidx = busy_to_bucket(runtime); + pred_demand = get_pred_busy(rq, p, bidx, runtime); + bucket_increase(p->ravg.busy_buckets, bidx); + + return pred_demand; +} + +#define THRESH_CC_UPDATE (2 * NSEC_PER_USEC) + +/* + * Assumes rq_lock is held and wallclock was recorded in the same critical + * section as this function's invocation. + */ +static inline u64 read_cycle_counter(int cpu, u64 wallclock) +{ + struct sched_cluster *cluster = cpu_rq(cpu)->cluster; + u64 delta; + + if (unlikely(!cluster)) + return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu); + + /* + * Why don't we need locking here? Let's say that delta is negative + * because some other CPU happened to update last_cc_update with a + * more recent timestamp. We simply read the conter again in that case + * with no harmful side effects. This can happen if there is an FIQ + * between when we read the wallclock and when we use it here. + */ + delta = wallclock - atomic64_read(&cluster->last_cc_update); + if (delta > THRESH_CC_UPDATE) { + atomic64_set(&cluster->cycles, + cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu)); + atomic64_set(&cluster->last_cc_update, wallclock); + } + + return atomic64_read(&cluster->cycles); +} + +static void update_task_cpu_cycles(struct task_struct *p, int cpu, + u64 wallclock) +{ + if (use_cycle_counter) + p->cpu_cycles = read_cycle_counter(cpu, wallclock); +} + +static void +update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event, + u64 wallclock, u64 irqtime) +{ + u64 cur_cycles; + int cpu = cpu_of(rq); + + lockdep_assert_held(&rq->lock); + + if (!use_cycle_counter) { + rq->cc.cycles = cpu_cur_freq(cpu); + rq->cc.time = 1; + return; + } + + cur_cycles = read_cycle_counter(cpu, wallclock); + + /* + * If current task is idle task and irqtime == 0 CPU was + * indeed idle and probably its cycle counter was not + * increasing. We still need estimatied CPU frequency + * for IO wait time accounting. Use the previously + * calculated frequency in such a case. + */ + if (!is_idle_task(rq->curr) || irqtime) { + if (unlikely(cur_cycles < p->cpu_cycles)) + rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles); + else + rq->cc.cycles = cur_cycles - p->cpu_cycles; + rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC; + + if (event == IRQ_UPDATE && is_idle_task(p)) + /* + * Time between mark_start of idle task and IRQ handler + * entry time is CPU cycle counter stall period. + * Upon IRQ handler entry sched_account_irqstart() + * replenishes idle task's cpu cycle counter so + * rq->cc.cycles now represents increased cycles during + * IRQ handler rather than time between idle entry and + * IRQ exit. Thus use irqtime as time delta. + */ + rq->cc.time = irqtime; + else + rq->cc.time = wallclock - p->ravg.mark_start; + BUG_ON((s64)rq->cc.time < 0); + } + + p->cpu_cycles = cur_cycles; + + trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, + rq->cc.time, p); +} + +static int +account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event) +{ + /* + * No need to bother updating task demand for exiting tasks + * or the idle task. + */ + if (exiting_task(p) || is_idle_task(p)) + return 0; + + /* + * When a task is waking up it is completing a segment of non-busy + * time. Likewise, if wait time is not treated as busy time, then + * when a task begins to run or is migrated, it is not running and + * is completing a segment of non-busy time. + */ + if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME && + (event == PICK_NEXT_TASK || event == TASK_MIGRATE))) + return 0; + + /* + * TASK_UPDATE can be called on sleeping task, when its moved between + * related groups + */ + if (event == TASK_UPDATE) { + if (rq->curr == p) + return 1; + + return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0; + } + + return 1; +} + +/* + * Called when new window is starting for a task, to record cpu usage over + * recently concluded window(s). Normally 'samples' should be 1. It can be > 1 + * when, say, a real-time task runs without preemption for several windows at a + * stretch. + */ +static void update_history(struct rq *rq, struct task_struct *p, + u32 runtime, int samples, int event) +{ + u32 *hist = &p->ravg.sum_history[0]; + int ridx, widx; + u32 max = 0, avg, demand, pred_demand; + u64 sum = 0, wma = 0, ewa = 0; + + /* Ignore windows where task had no activity */ + if (!runtime || is_idle_task(p) || exiting_task(p) || !samples) + goto done; + + /* Push new 'runtime' value onto stack */ + widx = sched_ravg_hist_size - 1; + ridx = widx - samples; + for (; ridx >= 0; --widx, --ridx) { + hist[widx] = hist[ridx]; + sum += hist[widx]; + wma += hist[widx] * (sched_ravg_hist_size - widx); + ewa += hist[widx] << (sched_ravg_hist_size - widx - 1); + if (hist[widx] > max) + max = hist[widx]; + } + + for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) { + hist[widx] = runtime; + sum += hist[widx]; + wma += hist[widx] * (sched_ravg_hist_size - widx); + ewa += hist[widx] << (sched_ravg_hist_size - widx - 1); + if (hist[widx] > max) + max = hist[widx]; + } + + p->ravg.sum = 0; + + if (sched_window_stats_policy == WINDOW_STATS_RECENT) { + demand = runtime; + } else if (sched_window_stats_policy == WINDOW_STATS_MAX) { + demand = max; + } else { + avg = div64_u64(sum, sched_ravg_hist_size); + wma = div64_u64(wma, (sched_ravg_hist_size * (sched_ravg_hist_size + 1)) / 2); + ewa = div64_u64(ewa, (1 << sched_ravg_hist_size) - 1); + + if (sched_window_stats_policy == WINDOW_STATS_AVG) + demand = avg; + else if (sched_window_stats_policy == WINDOW_STATS_MAX_RECENT_WMA) + /* + * WMA stands for weighted moving average. It helps + * to smooth load curve and react faster while ramping + * down comparing with basic averaging. We do it only + * when load trend goes down. See below example (4 HS): + * + * WMA = (P0 * 4 + P1 * 3 + P2 * 2 + P3 * 1) / (4 + 3 + 2 + 1) + * + * This is done for power saving. Means when load disappears + * or becomes low, this algorithm caches real bottom load faster + * (because of weights) then taking AVG values. + */ + demand = max((u32) wma, runtime); + else if (sched_window_stats_policy == WINDOW_STATS_WMA) + demand = (u32) wma; + else if (sched_window_stats_policy == WINDOW_STATS_MAX_RECENT_EWA) + /* + * EWA stands for exponential weighted average + */ + demand = max((u32) ewa, runtime); + else if (sched_window_stats_policy == WINDOW_STATS_EWA) + demand = (u32) ewa; + else + demand = max(avg, runtime); + } + pred_demand = predict_and_update_buckets(rq, p, runtime); + + /* + * A throttled deadline sched class task gets dequeued without + * changing p->on_rq. Since the dequeue decrements hmp stats + * avoid decrementing it here again. + */ + if (task_on_rq_queued(p) && (!task_has_dl_policy(p) || + !p->dl.dl_throttled)) + p->sched_class->fixup_hmp_sched_stats(rq, p, demand, + pred_demand); + + p->ravg.demand = demand; + p->ravg.pred_demand = pred_demand; + +done: + trace_sched_update_history(rq, p, runtime, samples, event); +} + +static u64 add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta) +{ + delta = scale_exec_time(delta, rq); + p->ravg.sum += delta; + if (unlikely(p->ravg.sum > sched_ravg_window)) + p->ravg.sum = sched_ravg_window; + + return delta; +} + +/* + * Account cpu demand of task and/or update task's cpu demand history + * + * ms = p->ravg.mark_start; + * wc = wallclock + * ws = rq->window_start + * + * Three possibilities: + * + * a) Task event is contained within one window. + * window_start < mark_start < wallclock + * + * ws ms wc + * | | | + * V V V + * |---------------| + * + * In this case, p->ravg.sum is updated *iff* event is appropriate + * (ex: event == PUT_PREV_TASK) + * + * b) Task event spans two windows. + * mark_start < window_start < wallclock + * + * ms ws wc + * | | | + * V V V + * -----|------------------- + * + * In this case, p->ravg.sum is updated with (ws - ms) *iff* event + * is appropriate, then a new window sample is recorded followed + * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate. + * + * c) Task event spans more than two windows. + * + * ms ws_tmp ws wc + * | | | | + * V V V V + * ---|-------|-------|-------|-------|------ + * | | + * |<------ nr_full_windows ------>| + * + * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff* + * event is appropriate, window sample of p->ravg.sum is recorded, + * 'nr_full_window' samples of window_size is also recorded *iff* + * event is appropriate and finally p->ravg.sum is set to (wc - ws) + * *iff* event is appropriate. + * + * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time() + * depends on it! + */ +static u64 update_task_demand(struct task_struct *p, struct rq *rq, + int event, u64 wallclock) +{ + u64 mark_start = p->ravg.mark_start; + u64 delta, window_start = rq->window_start; + int new_window, nr_full_windows; + u32 window_size = sched_ravg_window; + u64 runtime; + + new_window = mark_start < window_start; + if (!account_busy_for_task_demand(rq, p, event)) { + if (new_window) + /* + * If the time accounted isn't being accounted as + * busy time, and a new window started, only the + * previous window need be closed out with the + * pre-existing demand. Multiple windows may have + * elapsed, but since empty windows are dropped, + * it is not necessary to account those. + */ + update_history(rq, p, p->ravg.sum, 1, event); + return 0; + } + + if (!new_window) { + /* + * The simple case - busy time contained within the existing + * window. + */ + return add_to_task_demand(rq, p, wallclock - mark_start); + } + + /* + * Busy time spans at least two windows. Temporarily rewind + * window_start to first window boundary after mark_start. + */ + delta = window_start - mark_start; + nr_full_windows = div64_u64(delta, window_size); + window_start -= (u64)nr_full_windows * (u64)window_size; + + /* Process (window_start - mark_start) first */ + runtime = add_to_task_demand(rq, p, window_start - mark_start); + + /* Push new sample(s) into task's demand history */ + update_history(rq, p, p->ravg.sum, 1, event); + if (nr_full_windows) { + u64 scaled_window = scale_exec_time(window_size, rq); + + update_history(rq, p, scaled_window, nr_full_windows, event); + runtime += nr_full_windows * scaled_window; + } + + /* + * Roll window_start back to current to process any remainder + * in current window. + */ + window_start += (u64)nr_full_windows * (u64)window_size; + + /* Process (wallclock - window_start) next */ + mark_start = window_start; + runtime += add_to_task_demand(rq, p, wallclock - mark_start); + + return runtime; +} + +static inline void +update_task_burst(struct task_struct *p, struct rq *rq, int event, u64 runtime) +{ + /* + * update_task_demand() has checks for idle task and + * exit task. The runtime may include the wait time, + * so update the burst only for the cases where the + * task is running. + */ + if (event == PUT_PREV_TASK || (event == TASK_UPDATE && + rq->curr == p)) + p->ravg.curr_burst += runtime; +} + +/* Reflect task activity on its demand and cpu's busy time statistics */ +void update_task_ravg(struct task_struct *p, struct rq *rq, int event, + u64 wallclock, u64 irqtime) +{ + u64 runtime; + + if (!rq->window_start || sched_disable_window_stats || + p->ravg.mark_start == wallclock) + return; + + lockdep_assert_held(&rq->lock); + + update_window_start(rq, wallclock); + + if (!p->ravg.mark_start) { + update_task_cpu_cycles(p, cpu_of(rq), wallclock); + goto done; + } + + update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime); + runtime = update_task_demand(p, rq, event, wallclock); + if (runtime) + update_task_burst(p, rq, event, runtime); + update_cpu_busy_time(p, rq, event, wallclock, irqtime); + update_task_pred_demand(rq, p, event); + + if (exiting_task(p)) + goto done; + + trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime, + rq->cc.cycles, rq->cc.time, + p->grp ? &rq->grp_time : NULL); + +done: + p->ravg.mark_start = wallclock; +} + +void sched_account_irqtime(int cpu, struct task_struct *curr, + u64 delta, u64 wallclock) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long flags, nr_windows; + u64 cur_jiffies_ts; + + raw_spin_lock_irqsave(&rq->lock, flags); + + /* + * cputime (wallclock) uses sched_clock so use the same here for + * consistency. + */ + delta += sched_clock() - wallclock; + cur_jiffies_ts = get_jiffies_64(); + + if (is_idle_task(curr)) + update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(), + delta); + + nr_windows = cur_jiffies_ts - rq->irqload_ts; + + if (nr_windows) { + if (nr_windows < 10) { + /* Decay CPU's irqload by 3/4 for each window. */ + rq->avg_irqload *= (3 * nr_windows); + rq->avg_irqload = div64_u64(rq->avg_irqload, + 4 * nr_windows); + } else { + rq->avg_irqload = 0; + } + rq->avg_irqload += rq->cur_irqload; + rq->cur_irqload = 0; + } + + rq->cur_irqload += delta; + rq->irqload_ts = cur_jiffies_ts; + raw_spin_unlock_irqrestore(&rq->lock, flags); +} + +void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) +{ + struct rq *rq = cpu_rq(cpu); + + if (!rq->window_start || sched_disable_window_stats) + return; + + if (is_idle_task(curr)) { + /* We're here without rq->lock held, IRQ disabled */ + raw_spin_lock(&rq->lock); + update_task_cpu_cycles(curr, cpu, sched_ktime_clock()); + raw_spin_unlock(&rq->lock); + } +} + +void reset_task_stats(struct task_struct *p) +{ + u32 sum = 0; + u32 *curr_window_ptr = NULL; + u32 *prev_window_ptr = NULL; + + if (exiting_task(p)) { + sum = EXITING_TASK_MARKER; + } else { + curr_window_ptr = p->ravg.curr_window_cpu; + prev_window_ptr = p->ravg.prev_window_cpu; + memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids); + memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids); + } + + memset(&p->ravg, 0, sizeof(struct ravg)); + + p->ravg.curr_window_cpu = curr_window_ptr; + p->ravg.prev_window_cpu = prev_window_ptr; + + p->ravg.avg_burst = 2 * (u64)sysctl_sched_short_burst; + + /* Retain EXITING_TASK marker */ + p->ravg.sum_history[0] = sum; +} + +void mark_task_starting(struct task_struct *p) +{ + u64 wallclock; + struct rq *rq = task_rq(p); + + if (!rq->window_start || sched_disable_window_stats) { + reset_task_stats(p); + return; + } + + wallclock = sched_ktime_clock(); + p->ravg.mark_start = p->last_wake_ts = wallclock; + p->last_cpu_selected_ts = wallclock; + p->last_switch_out_ts = 0; + update_task_cpu_cycles(p, cpu_of(rq), wallclock); +} + +void set_window_start(struct rq *rq) +{ + static int sync_cpu_available; + + if (rq->window_start) + return; + + if (!sync_cpu_available) { + rq->window_start = sched_ktime_clock(); + sync_cpu_available = 1; + } else { + struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); + + raw_spin_unlock(&rq->lock); + double_rq_lock(rq, sync_rq); + rq->window_start = sync_rq->window_start; + rq->curr_runnable_sum = rq->prev_runnable_sum = 0; + rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; + raw_spin_unlock(&sync_rq->lock); + } + + rq->curr->ravg.mark_start = rq->window_start; +} + +static void reset_all_task_stats(void) +{ + struct task_struct *g, *p; + + do_each_thread(g, p) { + reset_task_stats(p); + } while_each_thread(g, p); +} + +enum reset_reason_code { + WINDOW_CHANGE, + POLICY_CHANGE, + HIST_SIZE_CHANGE, + FREQ_AGGREGATE_CHANGE, +}; + +const char *sched_window_reset_reasons[] = { + "WINDOW_CHANGE", + "POLICY_CHANGE", + "HIST_SIZE_CHANGE", + "FREQ_AGGREGATE_CHANGE", +}; + +/* Called with IRQs enabled */ +void reset_all_window_stats(u64 window_start, unsigned int window_size) +{ + int cpu, i; + unsigned long flags; + u64 start_ts = sched_ktime_clock(); + int reason = WINDOW_CHANGE; + unsigned int old = 0, new = 0; + + local_irq_save(flags); + + read_lock(&tasklist_lock); + + read_lock(&related_thread_group_lock); + + /* Taking all runqueue locks prevents race with sched_exit(). */ + for_each_possible_cpu(cpu) + raw_spin_lock(&cpu_rq(cpu)->lock); + + sched_disable_window_stats = 1; + + reset_all_task_stats(); + + read_unlock(&tasklist_lock); + + if (window_size) { + sched_ravg_window = window_size * TICK_NSEC; + set_hmp_defaults(); + WRITE_ONCE(sched_load_granule, sched_ravg_window / NUM_LOAD_INDICES); + } + + sched_disable_window_stats = 0; + + for_each_possible_cpu(cpu) { + struct rq *rq = cpu_rq(cpu); + + if (window_start) + rq->window_start = window_start; + rq->curr_runnable_sum = rq->prev_runnable_sum = 0; + rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; + memset(&rq->grp_time, 0, sizeof(struct group_cpu_time)); + + /* + * just commit zero, since grp_time/prev are 0 + */ + commit_prev_group_run_sum(rq); + + for (i = 0; i < NUM_TRACKED_WINDOWS; i++) { + memset(&rq->load_subs[i], 0, + sizeof(struct load_subtractions)); + clear_top_tasks_table(rq->top_tasks[i]); + clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]); + } + + rq->curr_table = 0; + rq->curr_top = 0; + rq->prev_top = 0; + reset_cpu_hmp_stats(cpu, 1); + } + + if (sched_window_stats_policy != sysctl_sched_window_stats_policy) { + reason = POLICY_CHANGE; + old = sched_window_stats_policy; + new = sysctl_sched_window_stats_policy; + sched_window_stats_policy = sysctl_sched_window_stats_policy; + } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) { + reason = HIST_SIZE_CHANGE; + old = sched_ravg_hist_size; + new = sysctl_sched_ravg_hist_size; + sched_ravg_hist_size = sysctl_sched_ravg_hist_size; + } else if (sched_freq_aggregate != + sysctl_sched_freq_aggregate) { + reason = FREQ_AGGREGATE_CHANGE; + old = sched_freq_aggregate; + new = sysctl_sched_freq_aggregate; + sched_freq_aggregate = sysctl_sched_freq_aggregate; + } + + for_each_possible_cpu(cpu) + raw_spin_unlock(&cpu_rq(cpu)->lock); + + read_unlock(&related_thread_group_lock); + + local_irq_restore(flags); + + trace_sched_reset_all_window_stats(window_start, window_size, + sched_ktime_clock() - start_ts, reason, old, new); +} + +/* + * In this function we match the accumulated subtractions with the current + * and previous windows we are operating with. Ignore any entries where + * the window start in the load_subtraction struct does not match either + * the curent or the previous window. This could happen whenever CPUs + * become idle or busy with interrupts disabled for an extended period. + */ +static inline void account_load_subtractions(struct rq *rq) +{ + u64 ws = rq->window_start; + u64 prev_ws = ws - sched_ravg_window; + struct load_subtractions *ls = rq->load_subs; + int i; + + for (i = 0; i < NUM_TRACKED_WINDOWS; i++) { + if (ls[i].window_start == ws) { + rq->curr_runnable_sum -= ls[i].subs; + rq->nt_curr_runnable_sum -= ls[i].new_subs; + } else if (ls[i].window_start == prev_ws) { + rq->prev_runnable_sum -= ls[i].subs; + rq->nt_prev_runnable_sum -= ls[i].new_subs; + } + + ls[i].subs = 0; + ls[i].new_subs = 0; + } + + commit_prev_group_run_sum(rq); + + BUG_ON((s64)rq->prev_runnable_sum < 0); + BUG_ON((s64)rq->curr_runnable_sum < 0); + BUG_ON((s64)rq->nt_prev_runnable_sum < 0); + BUG_ON((s64)rq->nt_curr_runnable_sum < 0); +} + +static inline u64 freq_policy_load(struct rq *rq, u64 load) +{ + unsigned int reporting_policy = sysctl_sched_freq_reporting_policy; + + switch (reporting_policy) { + case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK: + load = max_t(u64, load, top_task_load(rq)); + break; + case FREQ_REPORT_TOP_TASK: + load = top_task_load(rq); + break; + case FREQ_REPORT_CPU_LOAD: + break; + default: + break; + } + + return load; +} + +void sched_get_cpus_busy(struct sched_load *busy, + const struct cpumask *query_cpus) +{ + unsigned long flags; + struct rq *rq; + const int cpus = cpumask_weight(query_cpus); + u64 load[cpus], group_load[cpus]; + u64 nload[cpus], ngload[cpus]; + u64 pload[cpus]; + unsigned int max_freq[cpus]; + int notifier_sent = 0; + int early_detection[cpus]; + int cpu, i = 0; + unsigned int window_size; + u64 max_prev_sum = 0; + int max_busy_cpu = cpumask_first(query_cpus); + u64 total_group_load = 0, total_ngload = 0; + bool aggregate_load = false; + struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus)); + + if (unlikely(cpus == 0)) + return; + + local_irq_save(flags); + + /* + * This function could be called in timer context, and the + * current task may have been executing for a long time. Ensure + * that the window stats are current by doing an update. + */ + + for_each_cpu(cpu, query_cpus) + raw_spin_lock(&cpu_rq(cpu)->lock); + + window_size = sched_ravg_window; + + /* + * We don't really need the cluster lock for this entire for loop + * block. However, there is no advantage in optimizing this as rq + * locks are held regardless and would prevent migration anyways + */ + raw_spin_lock(&cluster->load_lock); + + for_each_cpu(cpu, query_cpus) { + rq = cpu_rq(cpu); + + update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(), + 0); + + /* + * Ensure that we don't report load for 'cpu' again via the + * cpufreq_update_util path in the window that started at + * rq->window_start + */ + rq->load_reported_window = rq->window_start; + + account_load_subtractions(rq); + load[i] = rq->prev_runnable_sum; + nload[i] = rq->nt_prev_runnable_sum; + pload[i] = rq->hmp_stats.pred_demands_sum; + rq->old_estimated_time = pload[i]; + + if (load[i] > max_prev_sum) { + max_prev_sum = load[i]; + max_busy_cpu = cpu; + } + + early_detection[i] = (rq->ed_task != NULL); + max_freq[i] = cpu_max_freq(cpu); + i++; + } + + raw_spin_unlock(&cluster->load_lock); + + group_load_in_freq_domain( + &cpu_rq(max_busy_cpu)->freq_domain_cpumask, + &total_group_load, &total_ngload); + aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold); + + i = 0; + for_each_cpu(cpu, query_cpus) { + group_load[i] = 0; + ngload[i] = 0; + + if (early_detection[i]) + goto skip_early; + + rq = cpu_rq(cpu); + if (aggregate_load) { + if (cpu == max_busy_cpu) { + group_load[i] = total_group_load; + ngload[i] = total_ngload; + } + } else { + group_load[i] = rq->grp_time.prev_runnable_sum; + ngload[i] = rq->grp_time.nt_prev_runnable_sum; + } + + load[i] += group_load[i]; + nload[i] += ngload[i]; + + load[i] = freq_policy_load(rq, load[i]); + rq->old_busy_time = load[i]; + + /* + * Scale load in reference to cluster max_possible_freq. + * + * Note that scale_load_to_cpu() scales load in reference to + * the cluster max_freq. + */ + load[i] = scale_load_to_cpu(load[i], cpu); + nload[i] = scale_load_to_cpu(nload[i], cpu); + pload[i] = scale_load_to_cpu(pload[i], cpu); +skip_early: + i++; + } + + for_each_cpu(cpu, query_cpus) { + rq = cpu_rq(cpu); + + /* + * sched_get_cpus_busy() is called for all CPUs in a + * frequency domain. So the notifier_sent flag per + * cluster works even when a frequency domain spans + * more than 1 cluster. + */ + if (atomic_cmpxchg(&rq->cluster->notifier_sent, 1, 0)) + notifier_sent = 1; + + raw_spin_unlock(&(cpu_rq(cpu))->lock); + } + + local_irq_restore(flags); + + i = 0; + for_each_cpu(cpu, query_cpus) { + rq = cpu_rq(cpu); + + if (early_detection[i]) { + busy[i].prev_load = div64_u64(sched_ravg_window, + NSEC_PER_USEC); + busy[i].new_task_load = 0; + busy[i].predicted_load = 0; + goto exit_early; + } + + load[i] = scale_load_to_freq(load[i], max_freq[i], + cpu_max_possible_freq(cpu)); + nload[i] = scale_load_to_freq(nload[i], max_freq[i], + cpu_max_possible_freq(cpu)); + + pload[i] = scale_load_to_freq(pload[i], max_freq[i], + rq->cluster->max_possible_freq); + + busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC); + busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC); + busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC); + +exit_early: + trace_sched_get_busy(cpu, busy[i].prev_load, + busy[i].new_task_load, + busy[i].predicted_load, + early_detection[i], + aggregate_load && + cpu == max_busy_cpu); + i++; + } +} + +void sched_set_io_is_busy(int val) +{ + sched_io_is_busy = val; +} + +int sched_set_window(u64 window_start, unsigned int window_size) +{ + u64 now, cur_jiffies, jiffy_ktime_ns; + s64 ws; + unsigned long flags; + + if (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW) + return -EINVAL; + + mutex_lock(&policy_mutex); + + /* + * Get a consistent view of ktime, jiffies, and the time + * since the last jiffy (based on last_jiffies_update). + */ + local_irq_save(flags); + cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns); + local_irq_restore(flags); + + /* translate window_start from jiffies to nanoseconds */ + ws = (window_start - cur_jiffies); /* jiffy difference */ + ws *= TICK_NSEC; + ws += jiffy_ktime_ns; + + /* + * Roll back calculated window start so that it is in + * the past (window stats must have a current window). + */ + while (ws > now) + ws -= (window_size * TICK_NSEC); + + BUG_ON(sched_ktime_clock() < ws); + + reset_all_window_stats(ws, window_size); + + sched_update_freq_max_load(cpu_possible_mask); + + mutex_unlock(&policy_mutex); + + return 0; +} + +static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index) +{ + rq->load_subs[index].window_start = ws; + rq->load_subs[index].subs = 0; + rq->load_subs[index].new_subs = 0; +} + +static bool get_subtraction_index(struct rq *rq, u64 ws) +{ + int i; + u64 oldest = ULLONG_MAX; + int oldest_index = 0; + + for (i = 0; i < NUM_TRACKED_WINDOWS; i++) { + u64 entry_ws = rq->load_subs[i].window_start; + + if (ws == entry_ws) + return i; + + if (entry_ws < oldest) { + oldest = entry_ws; + oldest_index = i; + } + } + + create_subtraction_entry(rq, ws, oldest_index); + return oldest_index; +} + +static void update_rq_load_subtractions(int index, struct rq *rq, + u32 sub_load, bool new_task) +{ + rq->load_subs[index].subs += sub_load; + if (new_task) + rq->load_subs[index].new_subs += sub_load; +} + +static void update_cluster_load_subtractions(struct task_struct *p, + int cpu, u64 ws, bool new_task) +{ + struct sched_cluster *cluster = cpu_cluster(cpu); + struct cpumask cluster_cpus = cluster->cpus; + u64 prev_ws = ws - sched_ravg_window; + int i; + + cpumask_clear_cpu(cpu, &cluster_cpus); + raw_spin_lock(&cluster->load_lock); + + for_each_cpu(i, &cluster_cpus) { + struct rq *rq = cpu_rq(i); + int index; + + if (p->ravg.curr_window_cpu[i]) { + index = get_subtraction_index(rq, ws); + update_rq_load_subtractions(index, rq, + p->ravg.curr_window_cpu[i], new_task); + p->ravg.curr_window_cpu[i] = 0; + } + + if (p->ravg.prev_window_cpu[i]) { + index = get_subtraction_index(rq, prev_ws); + update_rq_load_subtractions(index, rq, + p->ravg.prev_window_cpu[i], new_task); + p->ravg.prev_window_cpu[i] = 0; + } + } + + raw_spin_unlock(&cluster->load_lock); +} + +static inline void inter_cluster_migration_fixup + (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) +{ + struct rq *dest_rq = cpu_rq(new_cpu); + struct rq *src_rq = cpu_rq(task_cpu); + + if (same_freq_domain(new_cpu, task_cpu)) + return; + + p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window; + p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window; + + dest_rq->curr_runnable_sum += p->ravg.curr_window; + dest_rq->prev_runnable_sum += p->ravg.prev_window; + + src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu]; + src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu]; + + if (new_task) { + dest_rq->nt_curr_runnable_sum += p->ravg.curr_window; + dest_rq->nt_prev_runnable_sum += p->ravg.prev_window; + + src_rq->nt_curr_runnable_sum -= + p->ravg.curr_window_cpu[task_cpu]; + src_rq->nt_prev_runnable_sum -= + p->ravg.prev_window_cpu[task_cpu]; + } + + p->ravg.curr_window_cpu[task_cpu] = 0; + p->ravg.prev_window_cpu[task_cpu] = 0; + + update_cluster_load_subtractions(p, task_cpu, + src_rq->window_start, new_task); + + BUG_ON((s64)src_rq->prev_runnable_sum < 0); + BUG_ON((s64)src_rq->curr_runnable_sum < 0); + BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0); + BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0); +} + +static int get_top_index(unsigned long *bitmap, unsigned long old_top) +{ + int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top); + + if (index == NUM_LOAD_INDICES) + return 0; + + return NUM_LOAD_INDICES - 1 - index; +} + +static void +migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq) +{ + int index; + int top_index; + u32 curr_window = p->ravg.curr_window; + u32 prev_window = p->ravg.prev_window; + u8 src = src_rq->curr_table; + u8 dst = dst_rq->curr_table; + u8 *src_table; + u8 *dst_table; + + if (curr_window) { + src_table = src_rq->top_tasks[src]; + dst_table = dst_rq->top_tasks[dst]; + index = load_to_index(curr_window); + src_table[index] -= 1; + dst_table[index] += 1; + + if (!src_table[index]) + __clear_bit(NUM_LOAD_INDICES - index - 1, + src_rq->top_tasks_bitmap[src]); + + if (dst_table[index] == 1) + __set_bit(NUM_LOAD_INDICES - index - 1, + dst_rq->top_tasks_bitmap[dst]); + + if (index > dst_rq->curr_top) + dst_rq->curr_top = index; + + top_index = src_rq->curr_top; + if (index == top_index && !src_table[index]) + src_rq->curr_top = get_top_index( + src_rq->top_tasks_bitmap[src], top_index); + } + + if (prev_window) { + src = 1 - src; + dst = 1 - dst; + src_table = src_rq->top_tasks[src]; + dst_table = dst_rq->top_tasks[dst]; + index = load_to_index(prev_window); + src_table[index] -= 1; + dst_table[index] += 1; + + if (!src_table[index]) + __clear_bit(NUM_LOAD_INDICES - index - 1, + src_rq->top_tasks_bitmap[src]); + + if (dst_table[index] == 1) + __set_bit(NUM_LOAD_INDICES - index - 1, + dst_rq->top_tasks_bitmap[dst]); + + if (index > dst_rq->prev_top) + dst_rq->prev_top = index; + + top_index = src_rq->prev_top; + if (index == top_index && !src_table[index]) + src_rq->prev_top = get_top_index( + src_rq->top_tasks_bitmap[src], top_index); + } +} + +void fixup_busy_time(struct task_struct *p, int new_cpu) +{ + struct rq *src_rq = task_rq(p); + struct rq *dest_rq = cpu_rq(new_cpu); + u64 wallclock; + u64 *src_curr_runnable_sum, *dst_curr_runnable_sum; + u64 *src_prev_runnable_sum, *dst_prev_runnable_sum; + u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum; + u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum; + bool new_task; + struct related_thread_group *grp; + + if (!p->on_rq && p->state != TASK_WAKING) + return; + + if (exiting_task(p)) { + clear_ed_task(p, src_rq); + return; + } + + if (p->state == TASK_WAKING) + double_rq_lock(src_rq, dest_rq); + + if (sched_disable_window_stats) + goto done; + + wallclock = sched_ktime_clock(); + + update_task_ravg(task_rq(p)->curr, task_rq(p), + TASK_UPDATE, + wallclock, 0); + update_task_ravg(dest_rq->curr, dest_rq, + TASK_UPDATE, wallclock, 0); + + update_task_ravg(p, task_rq(p), TASK_MIGRATE, + wallclock, 0); + + update_task_cpu_cycles(p, new_cpu, wallclock); + + new_task = is_new_task(p); + /* Protected by rq_lock */ + grp = p->grp; + + /* + * For frequency aggregation, we continue to do migration fixups + * even for intra cluster migrations. This is because, the aggregated + * load has to reported on a single CPU regardless. + */ + if (grp && sched_freq_aggregate) { + struct group_cpu_time *cpu_time; + + cpu_time = &src_rq->grp_time; + src_curr_runnable_sum = &cpu_time->curr_runnable_sum; + src_prev_runnable_sum = &cpu_time->prev_runnable_sum; + src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; + src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; + + cpu_time = &dest_rq->grp_time; + dst_curr_runnable_sum = &cpu_time->curr_runnable_sum; + dst_prev_runnable_sum = &cpu_time->prev_runnable_sum; + dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; + dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; + + if (p->ravg.curr_window) { + *src_curr_runnable_sum -= p->ravg.curr_window; + *dst_curr_runnable_sum += p->ravg.curr_window; + if (new_task) { + *src_nt_curr_runnable_sum -= + p->ravg.curr_window; + *dst_nt_curr_runnable_sum += + p->ravg.curr_window; + } + } + + if (p->ravg.prev_window) { + *src_prev_runnable_sum -= p->ravg.prev_window; + *dst_prev_runnable_sum += p->ravg.prev_window; + if (new_task) { + *src_nt_prev_runnable_sum -= + p->ravg.prev_window; + *dst_nt_prev_runnable_sum += + p->ravg.prev_window; + } + } + } else { + inter_cluster_migration_fixup(p, new_cpu, + task_cpu(p), new_task); + } + + migrate_top_tasks(p, src_rq, dest_rq); + + if (!same_freq_domain(new_cpu, task_cpu(p))) { + cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); + cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); + } + + if (p == src_rq->ed_task) { + src_rq->ed_task = NULL; + if (!dest_rq->ed_task) + dest_rq->ed_task = p; + } + + commit_prev_group_run_sum(src_rq); + commit_prev_group_run_sum(dest_rq); + +done: + if (p->state == TASK_WAKING) + double_rq_unlock(src_rq, dest_rq); +} + +#define sched_up_down_migrate_auto_update 1 +static void check_for_up_down_migrate_update(const struct cpumask *cpus) +{ + int i = cpumask_first(cpus); + + if (!sched_up_down_migrate_auto_update) + return; + + if (cpu_max_possible_capacity(i) == max_possible_capacity) + return; + + if (cpu_max_possible_freq(i) == cpu_max_freq(i)) + up_down_migrate_scale_factor = 1024; + else + up_down_migrate_scale_factor = (1024 * + cpu_max_possible_freq(i)) / cpu_max_freq(i); + + update_up_down_migrate(); +} + +/* Return cluster which can offer required capacity for group */ +static struct sched_cluster *best_cluster(struct related_thread_group *grp, + u64 total_demand, bool group_boost) +{ + struct sched_cluster *cluster = NULL; + + for_each_sched_cluster(cluster) { + if (group_will_fit(cluster, grp, total_demand, group_boost)) + return cluster; + } + + return sched_cluster[0]; +} + +static void _set_preferred_cluster(struct related_thread_group *grp) +{ + struct task_struct *p; + u64 combined_demand = 0; + bool boost_on_big = sched_boost_policy() == SCHED_BOOST_ON_BIG; + bool group_boost = false; + u64 wallclock; + + if (list_empty(&grp->tasks)) + return; + + wallclock = sched_ktime_clock(); + + /* + * wakeup of two or more related tasks could race with each other and + * could result in multiple calls to _set_preferred_cluster being issued + * at same time. Avoid overhead in such cases of rechecking preferred + * cluster + */ + if (wallclock - grp->last_update < sched_ravg_window / 10) + return; + + list_for_each_entry(p, &grp->tasks, grp_list) { + if (boost_on_big && task_sched_boost(p)) { + group_boost = true; + break; + } + + if (p->ravg.mark_start < wallclock - + (sched_ravg_window * sched_ravg_hist_size)) + continue; + + combined_demand += p->ravg.demand; + + } + + grp->preferred_cluster = best_cluster(grp, + combined_demand, group_boost); + grp->last_update = sched_ktime_clock(); + trace_sched_set_preferred_cluster(grp, combined_demand); +} + +void set_preferred_cluster(struct related_thread_group *grp) +{ + raw_spin_lock(&grp->lock); + _set_preferred_cluster(grp); + raw_spin_unlock(&grp->lock); +} + +#define ADD_TASK 0 +#define REM_TASK 1 + +#define DEFAULT_CGROUP_COLOC_ID 1 + +/* + * Task's cpu usage is accounted in: + * rq->curr/prev_runnable_sum, when its ->grp is NULL + * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL + * + * Transfer task's cpu usage between those counters when transitioning between + * groups + */ +static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp, + struct task_struct *p, int event) +{ + u64 wallclock; + struct group_cpu_time *cpu_time; + u64 *src_curr_runnable_sum, *dst_curr_runnable_sum; + u64 *src_prev_runnable_sum, *dst_prev_runnable_sum; + u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum; + u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum; + int migrate_type; + int cpu = cpu_of(rq); + bool new_task; + int i; + + if (!sched_freq_aggregate) + return; + + wallclock = sched_ktime_clock(); + + update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0); + new_task = is_new_task(p); + + cpu_time = &rq->grp_time; + if (event == ADD_TASK) { + migrate_type = RQ_TO_GROUP; + + src_curr_runnable_sum = &rq->curr_runnable_sum; + dst_curr_runnable_sum = &cpu_time->curr_runnable_sum; + src_prev_runnable_sum = &rq->prev_runnable_sum; + dst_prev_runnable_sum = &cpu_time->prev_runnable_sum; + + src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; + dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; + src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; + dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; + + *src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu]; + *src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu]; + if (new_task) { + *src_nt_curr_runnable_sum -= + p->ravg.curr_window_cpu[cpu]; + *src_nt_prev_runnable_sum -= + p->ravg.prev_window_cpu[cpu]; + } + + update_cluster_load_subtractions(p, cpu, + rq->window_start, new_task); + + } else { + migrate_type = GROUP_TO_RQ; + + src_curr_runnable_sum = &cpu_time->curr_runnable_sum; + dst_curr_runnable_sum = &rq->curr_runnable_sum; + src_prev_runnable_sum = &cpu_time->prev_runnable_sum; + dst_prev_runnable_sum = &rq->prev_runnable_sum; + + src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum; + dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum; + src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum; + dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum; + + *src_curr_runnable_sum -= p->ravg.curr_window; + *src_prev_runnable_sum -= p->ravg.prev_window; + if (new_task) { + *src_nt_curr_runnable_sum -= p->ravg.curr_window; + *src_nt_prev_runnable_sum -= p->ravg.prev_window; + } + + /* + * Need to reset curr/prev windows for all CPUs, not just the + * ones in the same cluster. Since inter cluster migrations + * did not result in the appropriate book keeping, the values + * per CPU would be inaccurate. + */ + for_each_possible_cpu(i) { + p->ravg.curr_window_cpu[i] = 0; + p->ravg.prev_window_cpu[i] = 0; + } + } + + *dst_curr_runnable_sum += p->ravg.curr_window; + *dst_prev_runnable_sum += p->ravg.prev_window; + if (new_task) { + *dst_nt_curr_runnable_sum += p->ravg.curr_window; + *dst_nt_prev_runnable_sum += p->ravg.prev_window; + } + + /* + * When a task enter or exits a group, it's curr and prev windows are + * moved to a single CPU. This behavior might be sub-optimal in the + * exit case, however, it saves us the overhead of handling inter + * cluster migration fixups while the task is part of a related group. + */ + p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window; + p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window; + + commit_prev_group_run_sum(rq); + + trace_sched_migration_update_sum(p, migrate_type, rq); + + BUG_ON((s64)*src_curr_runnable_sum < 0); + BUG_ON((s64)*src_prev_runnable_sum < 0); + BUG_ON((s64)*src_nt_curr_runnable_sum < 0); + BUG_ON((s64)*src_nt_prev_runnable_sum < 0); +} + +static inline struct related_thread_group* +lookup_related_thread_group(unsigned int group_id) +{ + return related_thread_groups[group_id]; +} + +int alloc_related_thread_groups(void) +{ + int i, ret; + struct related_thread_group *grp; + + /* groupd_id = 0 is invalid as it's special id to remove group. */ + for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) { + grp = kzalloc(sizeof(*grp), GFP_NOWAIT); + if (!grp) { + ret = -ENOMEM; + goto err; + } + + grp->id = i; + INIT_LIST_HEAD(&grp->tasks); + INIT_LIST_HEAD(&grp->list); + raw_spin_lock_init(&grp->lock); + + related_thread_groups[i] = grp; + } + + return 0; + +err: + for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) { + grp = lookup_related_thread_group(i); + if (grp) { + kfree(grp); + related_thread_groups[i] = NULL; + } else { + break; + } + } + + return ret; +} + +static void remove_task_from_group(struct task_struct *p) +{ + struct related_thread_group *grp = p->grp; + struct rq *rq; + int empty_group = 1; + + raw_spin_lock(&grp->lock); + + rq = __task_rq_lock(p); + transfer_busy_time(rq, p->grp, p, REM_TASK); + list_del_init(&p->grp_list); + rcu_assign_pointer(p->grp, NULL); + __task_rq_unlock(rq); + + if (!list_empty(&grp->tasks)) { + empty_group = 0; + _set_preferred_cluster(grp); + } + + raw_spin_unlock(&grp->lock); + + /* Reserved groups cannot be destroyed */ + if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID) + /* + * We test whether grp->list is attached with list_empty() + * hence re-init the list after deletion. + */ + list_del_init(&grp->list); +} + +static int +add_task_to_group(struct task_struct *p, struct related_thread_group *grp) +{ + struct rq *rq; + + raw_spin_lock(&grp->lock); + + /* + * Change p->grp under rq->lock. Will prevent races with read-side + * reference of p->grp in various hot-paths + */ + rq = __task_rq_lock(p); + transfer_busy_time(rq, grp, p, ADD_TASK); + list_add(&p->grp_list, &grp->tasks); + rcu_assign_pointer(p->grp, grp); + __task_rq_unlock(rq); + + _set_preferred_cluster(grp); + + raw_spin_unlock(&grp->lock); + + return 0; +} + +void add_new_task_to_grp(struct task_struct *new) +{ + unsigned long flags; + struct related_thread_group *grp; + struct task_struct *leader = new->group_leader; + unsigned int leader_grp_id = sched_get_group_id(leader); + + if (!sysctl_sched_enable_thread_grouping && + leader_grp_id != DEFAULT_CGROUP_COLOC_ID) + return; + + if (thread_group_leader(new)) + return; + + if (leader_grp_id == DEFAULT_CGROUP_COLOC_ID) { + if (!same_schedtune(new, leader)) + return; + } + + write_lock_irqsave(&related_thread_group_lock, flags); + + rcu_read_lock(); + grp = task_related_thread_group(leader); + rcu_read_unlock(); + + /* + * It's possible that someone already added the new task to the + * group. A leader's thread group is updated prior to calling + * this function. It's also possible that the leader has exited + * the group. In either case, there is nothing else to do. + */ + if (!grp || new->grp) { + write_unlock_irqrestore(&related_thread_group_lock, flags); + return; + } + + raw_spin_lock(&grp->lock); + + rcu_assign_pointer(new->grp, grp); + list_add(&new->grp_list, &grp->tasks); + + raw_spin_unlock(&grp->lock); + write_unlock_irqrestore(&related_thread_group_lock, flags); +} + +static int __sched_set_group_id(struct task_struct *p, unsigned int group_id) +{ + int rc = 0; + unsigned long flags; + struct related_thread_group *grp = NULL; + + if (group_id >= MAX_NUM_CGROUP_COLOC_ID) + return -EINVAL; + + raw_spin_lock_irqsave(&p->pi_lock, flags); + write_lock(&related_thread_group_lock); + + /* Switching from one group to another directly is not permitted */ + if ((current != p && p->flags & PF_EXITING) || + (!p->grp && !group_id) || + (p->grp && group_id)) + goto done; + + if (!group_id) { + remove_task_from_group(p); + goto done; + } + + grp = lookup_related_thread_group(group_id); + if (list_empty(&grp->list)) + list_add(&grp->list, &active_related_thread_groups); + + rc = add_task_to_group(p, grp); +done: + write_unlock(&related_thread_group_lock); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + return rc; +} + +int sched_set_group_id(struct task_struct *p, unsigned int group_id) +{ + /* DEFAULT_CGROUP_COLOC_ID is a reserved id */ + if (group_id == DEFAULT_CGROUP_COLOC_ID) + return -EINVAL; + + return __sched_set_group_id(p, group_id); +} + +unsigned int sched_get_group_id(struct task_struct *p) +{ + unsigned int group_id; + struct related_thread_group *grp; + + rcu_read_lock(); + grp = task_related_thread_group(p); + group_id = grp ? grp->id : 0; + rcu_read_unlock(); + + return group_id; +} + +#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE) +/* + * We create a default colocation group at boot. There is no need to + * synchronize tasks between cgroups at creation time because the + * correct cgroup hierarchy is not available at boot. Therefore cgroup + * colocation is turned off by default even though the colocation group + * itself has been allocated. Furthermore this colocation group cannot + * be destroyted once it has been created. All of this has been as part + * of runtime optimizations. + * + * The job of synchronizing tasks to the colocation group is done when + * the colocation flag in the cgroup is turned on. + */ +static int __init create_default_coloc_group(void) +{ + struct related_thread_group *grp = NULL; + unsigned long flags; + + grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID); + write_lock_irqsave(&related_thread_group_lock, flags); + list_add(&grp->list, &active_related_thread_groups); + write_unlock_irqrestore(&related_thread_group_lock, flags); + + update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH); + return 0; +} +late_initcall(create_default_coloc_group); + +int sync_cgroup_colocation(struct task_struct *p, bool insert) +{ + unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0; + + return __sched_set_group_id(p, grp_id); +} +#endif + +static void update_cpu_cluster_capacity(const cpumask_t *cpus) +{ + int i; + struct sched_cluster *cluster; + struct cpumask cpumask; + + cpumask_copy(&cpumask, cpus); + pre_big_task_count_change(cpu_possible_mask); + + for_each_cpu(i, &cpumask) { + cluster = cpu_rq(i)->cluster; + cpumask_andnot(&cpumask, &cpumask, &cluster->cpus); + + cluster->capacity = compute_capacity(cluster); + cluster->load_scale_factor = compute_load_scale_factor(cluster); + + /* 'cpus' can contain cpumask more than one cluster */ + check_for_up_down_migrate_update(&cluster->cpus); + } + + __update_min_max_capacity(); + + post_big_task_count_change(cpu_possible_mask); +} + +static DEFINE_SPINLOCK(cpu_freq_min_max_lock); +void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax) +{ + struct cpumask cpumask; + struct sched_cluster *cluster; + int i, update_capacity = 0; + unsigned long flags; + + spin_lock_irqsave(&cpu_freq_min_max_lock, flags); + cpumask_copy(&cpumask, cpus); + for_each_cpu(i, &cpumask) { + cluster = cpu_rq(i)->cluster; + cpumask_andnot(&cpumask, &cpumask, &cluster->cpus); + + update_capacity += (cluster->max_mitigated_freq != fmax); + cluster->max_mitigated_freq = fmax; + } + spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags); + + if (update_capacity) + update_cpu_cluster_capacity(cpus); +} + +static int cpufreq_notifier_policy(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_policy *policy = (struct cpufreq_policy *)data; + struct sched_cluster *cluster = NULL; + struct cpumask policy_cluster = *policy->related_cpus; + unsigned int orig_max_freq = 0; + int i, j, update_capacity = 0; + + if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY && + val != CPUFREQ_CREATE_POLICY) + return 0; + + if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) { + update_min_max_capacity(); + return 0; + } + + max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq); + if (min_max_freq == 1) + min_max_freq = UINT_MAX; + min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq); + BUG_ON(!min_max_freq); + BUG_ON(!policy->max); + + for_each_cpu(i, &policy_cluster) { + cluster = cpu_rq(i)->cluster; + cpumask_andnot(&policy_cluster, &policy_cluster, + &cluster->cpus); + + orig_max_freq = cluster->max_freq; + cluster->min_freq = policy->min; + cluster->max_freq = policy->max; + cluster->cur_freq = policy->cur; + + if (!cluster->freq_init_done) { + mutex_lock(&cluster_lock); + for_each_cpu(j, &cluster->cpus) + cpumask_copy(&cpu_rq(j)->freq_domain_cpumask, + policy->related_cpus); + cluster->max_possible_freq = policy->cpuinfo.max_freq; + cluster->max_possible_capacity = + compute_max_possible_capacity(cluster); + cluster->freq_init_done = true; + + sort_clusters(); + update_all_clusters_stats(); + mutex_unlock(&cluster_lock); + continue; + } + + update_capacity += (orig_max_freq != cluster->max_freq); + } + + if (update_capacity) + update_cpu_cluster_capacity(policy->related_cpus); + + return 0; +} + +static int cpufreq_notifier_trans(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data; + unsigned int cpu = freq->cpu, new_freq = freq->new; + unsigned long flags; + struct sched_cluster *cluster; + struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask; + int i, j; + + if (val != CPUFREQ_POSTCHANGE) + return 0; + + BUG_ON(!new_freq); + + if (cpu_cur_freq(cpu) == new_freq) + return 0; + + for_each_cpu(i, &policy_cpus) { + cluster = cpu_rq(i)->cluster; + + for_each_cpu(j, &cluster->cpus) { + struct rq *rq = cpu_rq(j); + + raw_spin_lock_irqsave(&rq->lock, flags); + update_task_ravg(rq->curr, rq, TASK_UPDATE, + sched_ktime_clock(), 0); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } + + cluster->cur_freq = new_freq; + cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus); + } + + return 0; +} + +static int pwr_stats_ready_notifier(struct notifier_block *nb, + unsigned long cpu, void *data) +{ + cpumask_t mask = CPU_MASK_NONE; + + cpumask_set_cpu(cpu, &mask); + sched_update_freq_max_load(&mask); + + mutex_lock(&cluster_lock); + sort_clusters(); + mutex_unlock(&cluster_lock); + + return 0; +} + +static struct notifier_block notifier_policy_block = { + .notifier_call = cpufreq_notifier_policy +}; + +static struct notifier_block notifier_trans_block = { + .notifier_call = cpufreq_notifier_trans +}; + +static struct notifier_block notifier_pwr_stats_ready = { + .notifier_call = pwr_stats_ready_notifier +}; + +int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb) +{ + return -EINVAL; +} + +static int register_sched_callback(void) +{ + int ret; + + ret = cpufreq_register_notifier(¬ifier_policy_block, + CPUFREQ_POLICY_NOTIFIER); + + if (!ret) + ret = cpufreq_register_notifier(¬ifier_trans_block, + CPUFREQ_TRANSITION_NOTIFIER); + + register_cpu_pwr_stats_ready_notifier(¬ifier_pwr_stats_ready); + + return 0; +} + +/* + * cpufreq callbacks can be registered at core_initcall or later time. + * Any registration done prior to that is "forgotten" by cpufreq. See + * initialization of variable init_cpufreq_transition_notifier_list_called + * for further information. + */ +core_initcall(register_sched_callback); + +int update_preferred_cluster(struct related_thread_group *grp, + struct task_struct *p, u32 old_load) +{ + u32 new_load = task_load(p); + + if (!grp) + return 0; + + /* + * Update if task's load has changed significantly or a complete window + * has passed since we last updated preference + */ + if (abs(new_load - old_load) > sched_ravg_window / 4 || + sched_ktime_clock() - grp->last_update > sched_ravg_window) + return 1; + + return 0; +} + +bool early_detection_notify(struct rq *rq, u64 wallclock) +{ + struct task_struct *p; + int loop_max = 10; + + if (sched_boost_policy() == SCHED_BOOST_NONE || !rq->cfs.h_nr_running) + return 0; + + rq->ed_task = NULL; + list_for_each_entry(p, &rq->cfs_tasks, se.group_node) { + if (!loop_max) + break; + + if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) { + rq->ed_task = p; + return 1; + } + + loop_max--; + } + + return 0; +} + +void update_avg_burst(struct task_struct *p) +{ + update_avg(&p->ravg.avg_burst, p->ravg.curr_burst); + p->ravg.curr_burst = 0; +} + +void note_task_waking(struct task_struct *p, u64 wallclock) +{ + u64 sleep_time = wallclock - p->last_switch_out_ts; + + /* + * When a short burst and short sleeping task goes for a long + * sleep, the task's avg_sleep_time gets boosted. It will not + * come below short_sleep threshold for a lot of time and it + * results in incorrect packing. The idead behind tracking + * avg_sleep_time is to detect if a task is short sleeping + * or not. So limit the sleep time to twice the short sleep + * threshold. For regular long sleeping tasks, the avg_sleep_time + * would be higher than threshold, and packing happens correctly. + */ + sleep_time = min_t(u64, sleep_time, 2 * sysctl_sched_short_sleep); + update_avg(&p->ravg.avg_sleep_time, sleep_time); + + p->last_wake_ts = wallclock; +} + +#ifdef CONFIG_CGROUP_SCHED +u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->upmigrate_discouraged; +} + +int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css, + struct cftype *cft, u64 upmigrate_discourage) +{ + struct task_group *tg = css_tg(css); + int discourage = upmigrate_discourage > 0; + + if (tg->upmigrate_discouraged == discourage) + return 0; + + /* + * Revisit big-task classification for tasks of this cgroup. It would + * have been efficient to walk tasks of just this cgroup in running + * state, but we don't have easy means to do that. Walk all tasks in + * running state on all cpus instead and re-visit their big task + * classification. + */ + get_online_cpus(); + pre_big_task_count_change(cpu_online_mask); + + tg->upmigrate_discouraged = discourage; + + post_big_task_count_change(cpu_online_mask); + put_online_cpus(); + + return 0; +} +#endif /* CONFIG_CGROUP_SCHED */ diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 33d7003fa1b8..d562efb04775 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -80,6 +80,26 @@ static void update_curr_idle(struct rq *rq) { } +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p) +{ +} + +static void +dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p) +{ +} + +static void +fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ +} + +#endif + /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -108,4 +128,9 @@ const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, .update_curr = update_curr_idle, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_idle, + .dec_hmp_sched_stats = dec_hmp_sched_stats_idle, + .fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle, +#endif }; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 69d36c945527..391ec29c71c0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -8,9 +8,9 @@ #include #include #include +#include #include -#include "walt.h" #include "tune.h" int sched_rr_timeslice = RR_TIMESLICE; @@ -259,8 +259,12 @@ static void pull_rt_task(struct rq *this_rq); static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) { - /* Try to pull RT tasks here if we lower this rq's prio */ - return rq->rt.highest_prio.curr > prev->prio; + /* + * Try to pull RT tasks here if we lower this rq's prio and cpu is not + * isolated + */ + return rq->rt.highest_prio.curr > prev->prio && + !cpu_isolated(cpu_of(rq)); } static inline int rt_overloaded(struct rq *rq) @@ -1245,6 +1249,41 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ + s64 task_load_delta = (s64)new_task_load - task_load(p); + s64 pred_demand_delta = PRED_DEMAND_DELTA; + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta, + pred_demand_delta); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) { @@ -1406,15 +1445,11 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; -#ifdef CONFIG_SMP - schedtune_enqueue_task(p, cpu_of(rq)); -#endif - if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags); - walt_inc_cumulative_runnable_avg(rq, p); + inc_hmp_sched_stats_rt(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); @@ -1451,13 +1486,9 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; -#ifdef CONFIG_SMP - schedtune_dequeue_task(p, cpu_of(rq)); -#endif - update_curr_rt(rq); dequeue_rt_entity(rt_se, flags); - walt_dec_cumulative_runnable_avg(rq, p); + dec_hmp_sched_stats_rt(rq, p); dequeue_pushable_task(rq, p); @@ -1512,6 +1543,22 @@ static void yield_task_rt(struct rq *rq) #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); +#ifdef CONFIG_SCHED_HMP +static int +select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags) +{ + int target; + + rcu_read_lock(); + target = find_lowest_rq(p); + if (target != -1) + cpu = target; + rcu_read_unlock(); + + return cpu; +} +#endif + /* * Return whether the task on the given cpu is currently non-preemptible * while handling a potentially long softint, or if the task is likely @@ -1564,6 +1611,10 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, struct rq *rq; bool may_not_preempt; +#ifdef CONFIG_SCHED_HMP + return select_task_rq_rt_hmp(p, cpu, sd_flag, flags); +#endif + /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) goto out; @@ -1829,6 +1880,109 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); +#ifdef CONFIG_SCHED_HMP + +static int find_lowest_rq_hmp(struct task_struct *task) +{ + struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask); + struct cpumask candidate_mask = CPU_MASK_NONE; + struct sched_cluster *cluster; + int best_cpu = -1; + int prev_cpu = task_cpu(task); + u64 cpu_load, min_load = ULLONG_MAX; + int i; + int restrict_cluster; + int boost_on_big; + int pack_task, wakeup_latency, least_wakeup_latency = INT_MAX; + + boost_on_big = sched_boost() == FULL_THROTTLE_BOOST && + sched_boost_policy() == SCHED_BOOST_ON_BIG; + + restrict_cluster = sysctl_sched_restrict_cluster_spill; + + /* Make sure the mask is initialized first */ + if (unlikely(!lowest_mask)) + return best_cpu; + + if (task->nr_cpus_allowed == 1) + return best_cpu; /* No other targets possible */ + + if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) + return best_cpu; /* No targets found */ + + pack_task = is_short_burst_task(task); + + /* + * At this point we have built a mask of cpus representing the + * lowest priority tasks in the system. Now we want to elect + * the best one based on our affinity and topology. + */ + +retry: + for_each_sched_cluster(cluster) { + if (boost_on_big && cluster->capacity != max_possible_capacity) + continue; + + cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask); + cpumask_andnot(&candidate_mask, &candidate_mask, + cpu_isolated_mask); + /* + * When placement boost is active, if there is no eligible CPU + * in the highest capacity cluster, we fallback to the other + * clusters. So clear the CPUs of the traversed cluster from + * the lowest_mask. + */ + if (unlikely(boost_on_big)) + cpumask_andnot(lowest_mask, lowest_mask, + &cluster->cpus); + + if (cpumask_empty(&candidate_mask)) + continue; + + for_each_cpu(i, &candidate_mask) { + if (sched_cpu_high_irqload(i)) + continue; + + cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg; + if (!restrict_cluster) + cpu_load = scale_load_to_cpu(cpu_load, i); + + if (pack_task) { + wakeup_latency = cpu_rq(i)->wakeup_latency; + + if (wakeup_latency > least_wakeup_latency) + continue; + + if (wakeup_latency < least_wakeup_latency) { + least_wakeup_latency = wakeup_latency; + min_load = cpu_load; + best_cpu = i; + continue; + } + } + + if (cpu_load < min_load || + (cpu_load == min_load && + (i == prev_cpu || (best_cpu != prev_cpu && + cpus_share_cache(prev_cpu, i))))) { + min_load = cpu_load; + best_cpu = i; + } + } + + if (restrict_cluster && best_cpu != -1) + break; + } + + if (unlikely(boost_on_big && best_cpu == -1)) { + boost_on_big = 0; + goto retry; + } + + return best_cpu; +} +#endif /* CONFIG_SCHED_HMP */ + static int find_lowest_rq(struct task_struct *task) { struct sched_domain *sd; @@ -1836,6 +1990,10 @@ static int find_lowest_rq(struct task_struct *task) int this_cpu = smp_processor_id(); int cpu = task_cpu(task); +#ifdef CONFIG_SCHED_HMP + return find_lowest_rq_hmp(task); +#endif + /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) return -1; @@ -2407,7 +2565,8 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p) * we may need to handle the pulling of RT tasks * now. */ - if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) + if (!task_on_rq_queued(p) || rq->rt.rt_nr_running || + cpu_isolated(cpu_of(rq))) return; queue_pull_task(rq); @@ -2596,6 +2755,11 @@ const struct sched_class rt_sched_class = { .switched_to = switched_to_rt, .update_curr = update_curr_rt, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_rt, + .dec_hmp_sched_stats = dec_hmp_sched_stats_rt, + .fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt, +#endif }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2e006205f5d1..d7786c11b02f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1,3 +1,8 @@ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include #include @@ -33,10 +38,8 @@ extern long calc_load_fold_active(struct rq *this_rq); #ifdef CONFIG_SMP extern void update_cpu_load_active(struct rq *this_rq); -extern void check_for_migration(struct rq *rq, struct task_struct *p); #else static inline void update_cpu_load_active(struct rq *this_rq) { } -static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } #endif /* @@ -246,6 +249,10 @@ struct cfs_bandwidth { struct task_group { struct cgroup_subsys_state css; +#ifdef CONFIG_SCHED_HMP + bool upmigrate_discouraged; +#endif + #ifdef CONFIG_FAIR_GROUP_SCHED /* schedulable entities of this group on each cpu */ struct sched_entity **se; @@ -351,12 +358,96 @@ static inline void set_task_rq_fair(struct sched_entity *se, #endif /* CONFIG_SMP */ #endif /* CONFIG_FAIR_GROUP_SCHED */ +extern struct task_group *css_tg(struct cgroup_subsys_state *css); #else /* CONFIG_CGROUP_SCHED */ struct cfs_bandwidth { }; #endif /* CONFIG_CGROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +#define NUM_TRACKED_WINDOWS 2 +#define NUM_LOAD_INDICES 1000 + +struct hmp_sched_stats { + int nr_big_tasks; + u64 cumulative_runnable_avg; + u64 pred_demands_sum; +}; + +struct load_subtractions { + u64 window_start; + u64 subs; + u64 new_subs; +}; + +struct group_cpu_time { + u64 curr_runnable_sum; + u64 prev_runnable_sum; + u64 nt_curr_runnable_sum; + u64 nt_prev_runnable_sum; +}; + +struct sched_cluster { + raw_spinlock_t load_lock; + struct list_head list; + struct cpumask cpus; + int id; + int max_power_cost; + int min_power_cost; + int max_possible_capacity; + int capacity; + int efficiency; /* Differentiate cpus with different IPC capability */ + int load_scale_factor; + unsigned int exec_scale_factor; + /* + * max_freq = user maximum + * max_mitigated_freq = thermal defined maximum + * max_possible_freq = maximum supported by hardware + */ + unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq; + unsigned int max_possible_freq; + bool freq_init_done; + int dstate, dstate_wakeup_latency, dstate_wakeup_energy; + unsigned int static_cluster_pwr_cost; + atomic_t notifier_sent; + bool wake_up_idle; + atomic64_t last_cc_update; + atomic64_t cycles; +}; + +extern unsigned long all_cluster_ids[]; + +static inline int cluster_first_cpu(struct sched_cluster *cluster) +{ + return cpumask_first(&cluster->cpus); +} + +struct related_thread_group { + int id; + raw_spinlock_t lock; + struct list_head tasks; + struct list_head list; + struct sched_cluster *preferred_cluster; + struct rcu_head rcu; + u64 last_update; +}; + +extern struct list_head cluster_head; +extern struct sched_cluster *sched_cluster[NR_CPUS]; + +struct cpu_cycle { + u64 cycles; + u64 time; +}; + +#define for_each_sched_cluster(cluster) \ + list_for_each_entry_rcu(cluster, &cluster_head, list) + +extern unsigned int sched_disable_window_stats; +#endif /* CONFIG_SCHED_HMP */ + /* CFS-related fields in a runqueue */ struct cfs_rq { struct load_weight load; @@ -425,12 +516,11 @@ struct cfs_rq { struct list_head leaf_cfs_rq_list; struct task_group *tg; /* group that "owns" this runqueue */ -#ifdef CONFIG_SCHED_WALT - u64 cumulative_runnable_avg; -#endif - #ifdef CONFIG_CFS_BANDWIDTH +#ifdef CONFIG_SCHED_HMP + struct hmp_sched_stats hmp_stats; +#endif int runtime_enabled; u64 runtime_expires; @@ -615,6 +705,7 @@ struct rq { * remote CPUs use both these fields when doing load calculation. */ unsigned int nr_running; + unsigned int nr_pinned_tasks; #ifdef CONFIG_NUMA_BALANCING unsigned int nr_numa_running; unsigned int nr_preferred_running; @@ -701,6 +792,38 @@ struct rq { u64 max_idle_balance_cost; #endif +#ifdef CONFIG_SCHED_HMP + struct sched_cluster *cluster; + struct cpumask freq_domain_cpumask; + struct hmp_sched_stats hmp_stats; + + int cstate, wakeup_latency, wakeup_energy; + u64 window_start; + u64 load_reported_window; + unsigned long hmp_flags; + + u64 cur_irqload; + u64 avg_irqload; + u64 irqload_ts; + unsigned int static_cpu_pwr_cost; + struct task_struct *ed_task; + struct cpu_cycle cc; + u64 old_busy_time, old_busy_time_group; + u64 old_estimated_time; + u64 curr_runnable_sum; + u64 prev_runnable_sum; + u64 nt_curr_runnable_sum; + u64 nt_prev_runnable_sum; + struct group_cpu_time grp_time; + struct load_subtractions load_subs[NUM_TRACKED_WINDOWS]; + DECLARE_BITMAP_ARRAY(top_tasks_bitmap, + NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES); + u8 *top_tasks[NUM_TRACKED_WINDOWS]; + u8 curr_table; + int prev_top; + int curr_top; +#endif + #ifdef CONFIG_SCHED_WALT u64 cumulative_runnable_avg; u64 window_start; @@ -994,6 +1117,644 @@ enum sched_boost_policy { SCHED_BOOST_ON_ALL, }; +#ifdef CONFIG_SCHED_HMP + +#define WINDOW_STATS_RECENT 0 +#define WINDOW_STATS_MAX 1 +#define WINDOW_STATS_MAX_RECENT_AVG 2 +#define WINDOW_STATS_AVG 3 +#define WINDOW_STATS_MAX_RECENT_WMA 4 +#define WINDOW_STATS_WMA 5 +#define WINDOW_STATS_MAX_RECENT_EWA 6 +#define WINDOW_STATS_EWA 7 +#define WINDOW_STATS_INVALID_POLICY 8 + +#define SCHED_UPMIGRATE_MIN_NICE 9 +#define EXITING_TASK_MARKER 0xdeaddead + +#define UP_MIGRATION 1 +#define DOWN_MIGRATION 2 +#define IRQLOAD_MIGRATION 3 + +extern struct mutex policy_mutex; +extern unsigned int sched_ravg_window; +extern unsigned int sched_disable_window_stats; +extern unsigned int max_possible_freq; +extern unsigned int min_max_freq; +extern unsigned int pct_task_load(struct task_struct *p); +extern unsigned int max_possible_efficiency; +extern unsigned int min_possible_efficiency; +extern unsigned int max_capacity; +extern unsigned int min_capacity; +extern unsigned int max_load_scale_factor; +extern unsigned int max_possible_capacity; +extern unsigned int min_max_possible_capacity; +extern unsigned int max_power_cost; +extern unsigned int sched_init_task_load_windows; +extern unsigned int up_down_migrate_scale_factor; +extern unsigned int sysctl_sched_restrict_cluster_spill; +extern unsigned int sched_pred_alert_load; +extern struct sched_cluster init_cluster; +extern unsigned int __read_mostly sched_short_sleep_task_threshold; +extern unsigned int __read_mostly sched_long_cpu_selection_threshold; +extern unsigned int __read_mostly sched_big_waker_task_load; +extern unsigned int __read_mostly sched_small_wakee_task_load; +extern unsigned int __read_mostly sched_spill_load; +extern unsigned int __read_mostly sched_upmigrate; +extern unsigned int __read_mostly sched_downmigrate; +extern unsigned int __read_mostly sched_load_granule; + +extern void init_new_task_load(struct task_struct *p); +extern u64 sched_ktime_clock(void); +extern int got_boost_kick(void); +extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); +extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event, + u64 wallclock, u64 irqtime); +extern bool early_detection_notify(struct rq *rq, u64 wallclock); +extern void clear_ed_task(struct task_struct *p, struct rq *rq); +extern void fixup_busy_time(struct task_struct *p, int new_cpu); +extern void clear_boost_kick(int cpu); +extern void clear_hmp_request(int cpu); +extern void mark_task_starting(struct task_struct *p); +extern void set_window_start(struct rq *rq); +extern void update_cluster_topology(void); +extern void note_task_waking(struct task_struct *p, u64 wallclock); +extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock); +extern void init_clusters(void); +extern void reset_cpu_hmp_stats(int cpu, int reset_cra); +extern unsigned int max_task_load(void); +extern void sched_account_irqtime(int cpu, struct task_struct *curr, + u64 delta, u64 wallclock); +extern void sched_account_irqstart(int cpu, struct task_struct *curr, + u64 wallclock); +extern unsigned int cpu_temp(int cpu); +extern unsigned int nr_eligible_big_tasks(int cpu); +extern int update_preferred_cluster(struct related_thread_group *grp, + struct task_struct *p, u32 old_load); +extern void set_preferred_cluster(struct related_thread_group *grp); +extern void add_new_task_to_grp(struct task_struct *new); +extern unsigned int update_freq_aggregate_threshold(unsigned int threshold); +extern void update_avg_burst(struct task_struct *p); +extern void update_avg(u64 *avg, u64 sample); + +#define NO_BOOST 0 +#define FULL_THROTTLE_BOOST 1 +#define CONSERVATIVE_BOOST 2 +#define RESTRAINED_BOOST 3 + +static inline struct sched_cluster *cpu_cluster(int cpu) +{ + return cpu_rq(cpu)->cluster; +} + +static inline int cpu_capacity(int cpu) +{ + return cpu_rq(cpu)->cluster->capacity; +} + +static inline int cpu_max_possible_capacity(int cpu) +{ + return cpu_rq(cpu)->cluster->max_possible_capacity; +} + +static inline int cpu_load_scale_factor(int cpu) +{ + return cpu_rq(cpu)->cluster->load_scale_factor; +} + +static inline int cpu_efficiency(int cpu) +{ + return cpu_rq(cpu)->cluster->efficiency; +} + +static inline unsigned int cpu_cur_freq(int cpu) +{ + return cpu_rq(cpu)->cluster->cur_freq; +} + +static inline unsigned int cpu_min_freq(int cpu) +{ + return cpu_rq(cpu)->cluster->min_freq; +} + +static inline unsigned int cluster_max_freq(struct sched_cluster *cluster) +{ + /* + * Governor and thermal driver don't know the other party's mitigation + * voting. So struct cluster saves both and return min() for current + * cluster fmax. + */ + return min(cluster->max_mitigated_freq, cluster->max_freq); +} + +static inline unsigned int cpu_max_freq(int cpu) +{ + return cluster_max_freq(cpu_rq(cpu)->cluster); +} + +static inline unsigned int cpu_max_possible_freq(int cpu) +{ + return cpu_rq(cpu)->cluster->max_possible_freq; +} + +static inline int same_cluster(int src_cpu, int dst_cpu) +{ + return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster; +} + +static inline int cpu_max_power_cost(int cpu) +{ + return cpu_rq(cpu)->cluster->max_power_cost; +} + +static inline int cpu_min_power_cost(int cpu) +{ + return cpu_rq(cpu)->cluster->min_power_cost; +} + +static inline u32 cpu_cycles_to_freq(u64 cycles, u64 period) +{ + return div64_u64(cycles, period); +} + +static inline bool hmp_capable(void) +{ + return max_possible_capacity != min_max_possible_capacity; +} + +static inline bool is_max_capacity_cpu(int cpu) +{ + return cpu_max_possible_capacity(cpu) == max_possible_capacity; +} + +static inline bool is_min_capacity_cpu(int cpu) +{ + return cpu_max_possible_capacity(cpu) == min_max_possible_capacity; +} + +/* + * 'load' is in reference to "best cpu" at its best frequency. + * Scale that in reference to a given cpu, accounting for how bad it is + * in reference to "best cpu". + */ +static inline u64 scale_load_to_cpu(u64 task_load, int cpu) +{ + u64 lsf = cpu_load_scale_factor(cpu); + + if (lsf != 1024) { + task_load *= lsf; + task_load /= 1024; + } + + return task_load; +} + +static inline unsigned int task_load(struct task_struct *p) +{ + return p->ravg.demand; +} + +static inline void +inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) +{ + u32 task_load; + + if (sched_disable_window_stats) + return; + + task_load = sched_disable_window_stats ? 0 : p->ravg.demand; + + stats->cumulative_runnable_avg += task_load; + stats->pred_demands_sum += p->ravg.pred_demand; +} + +static inline void +dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) +{ + u32 task_load; + + if (sched_disable_window_stats) + return; + + task_load = sched_disable_window_stats ? 0 : p->ravg.demand; + + stats->cumulative_runnable_avg -= task_load; + + BUG_ON((s64)stats->cumulative_runnable_avg < 0); + + stats->pred_demands_sum -= p->ravg.pred_demand; + BUG_ON((s64)stats->pred_demands_sum < 0); +} + +static inline void +fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p, s64 task_load_delta, + s64 pred_demand_delta) +{ + if (sched_disable_window_stats) + return; + + stats->cumulative_runnable_avg += task_load_delta; + BUG_ON((s64)stats->cumulative_runnable_avg < 0); + + stats->pred_demands_sum += pred_demand_delta; + BUG_ON((s64)stats->pred_demands_sum < 0); +} + +#define pct_to_real(tunable) \ + (div64_u64((u64)tunable * (u64)max_task_load(), 100)) + +#define real_to_pct(tunable) \ + (div64_u64((u64)tunable * (u64)100, (u64)max_task_load())) + +#define SCHED_HIGH_IRQ_TIMEOUT 3 +static inline u64 sched_irqload(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + s64 delta; + + delta = get_jiffies_64() - rq->irqload_ts; + /* + * Current context can be preempted by irq and rq->irqload_ts can be + * updated by irq context so that delta can be negative. + * But this is okay and we can safely return as this means there + * was recent irq occurrence. + */ + + if (delta < SCHED_HIGH_IRQ_TIMEOUT) + return rq->avg_irqload; + else + return 0; +} + +static inline int sched_cpu_high_irqload(int cpu) +{ + return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload; +} + +static inline bool task_in_related_thread_group(struct task_struct *p) +{ + return !!(rcu_access_pointer(p->grp) != NULL); +} + +static inline +struct related_thread_group *task_related_thread_group(struct task_struct *p) +{ + return rcu_dereference(p->grp); +} + +#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand) + +extern void +check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups); + +extern void notify_migration(int src_cpu, int dest_cpu, + bool src_cpu_dead, struct task_struct *p); + +/* Is frequency of two cpus synchronized with each other? */ +static inline int same_freq_domain(int src_cpu, int dst_cpu) +{ + struct rq *rq = cpu_rq(src_cpu); + + if (src_cpu == dst_cpu) + return 1; + + return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask); +} + +#define BOOST_KICK 0 +#define CPU_RESERVED 1 + +static inline int is_reserved(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + return test_bit(CPU_RESERVED, &rq->hmp_flags); +} + +static inline int mark_reserved(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + /* Name boost_flags as hmp_flags? */ + return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags); +} + +static inline void clear_reserved(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + clear_bit(CPU_RESERVED, &rq->hmp_flags); +} + +static inline u64 cpu_cravg_sync(int cpu, int sync) +{ + struct rq *rq = cpu_rq(cpu); + u64 load; + + load = rq->hmp_stats.cumulative_runnable_avg; + + /* + * If load is being checked in a sync wakeup environment, + * we may want to discount the load of the currently running + * task. + */ + if (sync && cpu == smp_processor_id()) { + if (load > rq->curr->ravg.demand) + load -= rq->curr->ravg.demand; + else + load = 0; + } + + return load; +} + +static inline bool is_short_burst_task(struct task_struct *p) +{ + return p->ravg.avg_burst < sysctl_sched_short_burst && + p->ravg.avg_sleep_time > sysctl_sched_short_sleep; +} + +extern void check_for_migration(struct rq *rq, struct task_struct *p); +extern void pre_big_task_count_change(const struct cpumask *cpus); +extern void post_big_task_count_change(const struct cpumask *cpus); +extern void set_hmp_defaults(void); +extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost); +extern unsigned int power_cost(int cpu, u64 demand); +extern void reset_all_window_stats(u64 window_start, unsigned int window_size); +extern int sched_boost(void); +extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu, + enum sched_boost_policy boost_policy); +extern enum sched_boost_policy sched_boost_policy(void); +extern int task_will_fit(struct task_struct *p, int cpu); +extern u64 cpu_load(int cpu); +extern u64 cpu_load_sync(int cpu, int sync); +extern int preferred_cluster(struct sched_cluster *cluster, + struct task_struct *p); +extern void inc_nr_big_task(struct hmp_sched_stats *stats, + struct task_struct *p); +extern void dec_nr_big_task(struct hmp_sched_stats *stats, + struct task_struct *p); +extern void inc_rq_hmp_stats(struct rq *rq, + struct task_struct *p, int change_cra); +extern void dec_rq_hmp_stats(struct rq *rq, + struct task_struct *p, int change_cra); +extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra); +extern int is_big_task(struct task_struct *p); +extern int upmigrate_discouraged(struct task_struct *p); +extern struct sched_cluster *rq_cluster(struct rq *rq); +extern int nr_big_tasks(struct rq *rq); +extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats, + struct task_struct *p, s64 delta); +extern void reset_task_stats(struct task_struct *p); +extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra); +extern void _inc_hmp_sched_stats_fair(struct rq *rq, + struct task_struct *p, int change_cra); +extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft); +extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css, + struct cftype *cft, u64 upmigrate_discourage); +extern void sched_boost_parse_dt(void); +extern void clear_top_tasks_bitmap(unsigned long *bitmap); + +#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE) +extern bool task_sched_boost(struct task_struct *p); +extern int sync_cgroup_colocation(struct task_struct *p, bool insert); +extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2); +extern void update_cgroup_boost_settings(void); +extern void restore_cgroup_boost_settings(void); + +#else +static inline bool +same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) +{ + return true; +} + +static inline bool task_sched_boost(struct task_struct *p) +{ + return true; +} + +static inline void update_cgroup_boost_settings(void) { } +static inline void restore_cgroup_boost_settings(void) { } +#endif + +extern int alloc_related_thread_groups(void); + +#else /* CONFIG_SCHED_HMP */ + +struct hmp_sched_stats; +struct related_thread_group; +struct sched_cluster; + +static inline enum sched_boost_policy sched_boost_policy(void) +{ + return SCHED_BOOST_NONE; +} + +static inline bool task_sched_boost(struct task_struct *p) +{ + return true; +} + +static inline int got_boost_kick(void) +{ + return 0; +} + +static inline void update_task_ravg(struct task_struct *p, struct rq *rq, + int event, u64 wallclock, u64 irqtime) { } + +static inline bool early_detection_notify(struct rq *rq, u64 wallclock) +{ + return 0; +} + +static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { } +static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } +static inline void clear_boost_kick(int cpu) { } +static inline void clear_hmp_request(int cpu) { } +static inline void mark_task_starting(struct task_struct *p) { } +static inline void set_window_start(struct rq *rq) { } +static inline void init_clusters(void) {} +static inline void update_cluster_topology(void) { } +static inline void note_task_waking(struct task_struct *p, u64 wallclock) { } +static inline void set_task_last_switch_out(struct task_struct *p, + u64 wallclock) { } + +static inline int task_will_fit(struct task_struct *p, int cpu) +{ + return 1; +} + +static inline int select_best_cpu(struct task_struct *p, int target, + int reason, int sync) +{ + return 0; +} + +static inline unsigned int power_cost(int cpu, u64 demand) +{ + return SCHED_CAPACITY_SCALE; +} + +static inline int sched_boost(void) +{ + return 0; +} + +static inline int is_big_task(struct task_struct *p) +{ + return 0; +} + +static inline int nr_big_tasks(struct rq *rq) +{ + return 0; +} + +static inline int is_cpu_throttling_imminent(int cpu) +{ + return 0; +} + +static inline int is_task_migration_throttled(struct task_struct *p) +{ + return 0; +} + +static inline unsigned int cpu_temp(int cpu) +{ + return 0; +} + +static inline void +inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { } + +static inline void +dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { } + +static inline void +inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { } + +static inline int +preferred_cluster(struct sched_cluster *cluster, struct task_struct *p) +{ + return 1; +} + +static inline struct sched_cluster *rq_cluster(struct rq *rq) +{ + return NULL; +} + +static inline void init_new_task_load(struct task_struct *p) +{ +} + +static inline u64 scale_load_to_cpu(u64 load, int cpu) +{ + return load; +} + +static inline unsigned int nr_eligible_big_tasks(int cpu) +{ + return 0; +} + +static inline bool is_max_capacity_cpu(int cpu) { return true; } + +static inline int pct_task_load(struct task_struct *p) { return 0; } + +static inline int cpu_capacity(int cpu) +{ + return SCHED_LOAD_SCALE; +} + +static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; } + +static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) +{ +} + +static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) +{ +} + +static inline void sched_account_irqtime(int cpu, struct task_struct *curr, + u64 delta, u64 wallclock) +{ +} + +static inline void sched_account_irqstart(int cpu, struct task_struct *curr, + u64 wallclock) +{ +} + +static inline int sched_cpu_high_irqload(int cpu) { return 0; } + +static inline void set_preferred_cluster(struct related_thread_group *grp) { } + +static inline bool task_in_related_thread_group(struct task_struct *p) +{ + return false; +} + +static inline +struct related_thread_group *task_related_thread_group(struct task_struct *p) +{ + return NULL; +} + +static inline u32 task_load(struct task_struct *p) { return 0; } + +static inline int update_preferred_cluster(struct related_thread_group *grp, + struct task_struct *p, u32 old_load) +{ + return 0; +} + +static inline void add_new_task_to_grp(struct task_struct *new) {} + +#define PRED_DEMAND_DELTA (0) + +static inline void +check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { } + +static inline void notify_migration(int src_cpu, int dest_cpu, + bool src_cpu_dead, struct task_struct *p) { } + +static inline int same_freq_domain(int src_cpu, int dst_cpu) +{ + return 1; +} + +static inline void check_for_migration(struct rq *rq, struct task_struct *p) { } +static inline void pre_big_task_count_change(void) { } +static inline void post_big_task_count_change(void) { } +static inline void set_hmp_defaults(void) { } + +static inline void clear_reserved(int cpu) { } +static inline void sched_boost_parse_dt(void) {} +static inline int alloc_related_thread_groups(void) { return 0; } + +#define trace_sched_cpu_load(...) +#define trace_sched_cpu_load_lb(...) +#define trace_sched_cpu_load_cgroup(...) +#define trace_sched_cpu_load_wakeup(...) + +static inline void update_avg_burst(struct task_struct *p) {} + +#endif /* CONFIG_SCHED_HMP */ + /* * Returns the rq capacity of any rq in a group. This does not play * well with groups where rq capacity can change independently. @@ -1199,6 +1960,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ +#define WF_NO_NOTIFIER 0x08 /* do not notify governor */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution @@ -1275,7 +2037,6 @@ static const u32 prio_to_wmult[40] = { #define DEQUEUE_SLEEP 0x01 #define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */ #define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */ -#define DEQUEUE_IDLE 0x80 /* The last dequeue before IDLE */ #define ENQUEUE_WAKEUP 0x01 #define ENQUEUE_RESTORE 0x02 @@ -1355,6 +2116,12 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_change_group)(struct task_struct *p, int type); #endif +#ifdef CONFIG_SCHED_HMP + void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p); + void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p); + void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand); +#endif }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -1379,6 +2146,7 @@ extern void init_max_cpu_capacity(struct max_cpu_capacity *mcc); extern void update_group_capacity(struct sched_domain *sd, int cpu); extern void trigger_load_balance(struct rq *rq); +extern void nohz_balance_clear_nohz_mask(int cpu); extern void idle_enter_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq); @@ -1466,6 +2234,7 @@ static inline void __add_nr_running(struct rq *rq, unsigned count) { unsigned prev_nr = rq->nr_running; + sched_update_nr_prod(cpu_of(rq), count, true); rq->nr_running = prev_nr + count; if (prev_nr < 2 && rq->nr_running >= 2) { @@ -1492,6 +2261,7 @@ static inline void __add_nr_running(struct rq *rq, unsigned count) static inline void __sub_nr_running(struct rq *rq, unsigned count) { + sched_update_nr_prod(cpu_of(rq), count, false); rq->nr_running -= count; } @@ -1593,26 +2363,6 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) } #endif -#ifndef arch_scale_max_freq_capacity -static __always_inline -unsigned long arch_scale_max_freq_capacity(struct sched_domain *sd, int cpu) -{ - return SCHED_CAPACITY_SCALE; -} -#endif - -#ifndef arch_scale_min_freq_capacity -static __always_inline -unsigned long arch_scale_min_freq_capacity(struct sched_domain *sd, int cpu) -{ - /* - * Multiplied with any capacity value, this scale factor will return - * 0, which represents an un-capped state - */ - return 0; -} -#endif - #ifndef arch_scale_cpu_capacity static __always_inline unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) @@ -1703,6 +2453,20 @@ static inline unsigned long cpu_util_freq(int cpu) #endif +#ifdef CONFIG_SCHED_HMP +/* + * HMP and EAS are orthogonal. Hopefully the compiler just elides out all code + * with the energy_aware() check, so that we don't even pay the comparison + * penalty at runtime. + */ +#define energy_aware() false +#else +static inline bool energy_aware(void) +{ + return sched_feat(ENERGY_AWARE); +} +#endif + static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); @@ -2015,6 +2779,9 @@ enum rq_nohz_flag_bits { NOHZ_BALANCE_KICK, }; +#define NOHZ_KICK_ANY 0 +#define NOHZ_KICK_RESTRICT 1 + #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) #endif @@ -2096,6 +2863,18 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { struct update_util_data *data; +#ifdef CONFIG_SCHED_HMP + /* + * Skip if we've already reported, but not if this is an inter-cluster + * migration + */ + if (!sched_disable_window_stats && + (rq->load_reported_window == rq->window_start) && + !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) + return; + rq->load_reported_window = rq->window_start; +#endif + data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); if (data) data->func(data, rq_clock(rq), flags); diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c new file mode 100644 index 000000000000..f03ed685f102 --- /dev/null +++ b/kernel/sched/sched_avg.c @@ -0,0 +1,199 @@ +/* Copyright (c) 2012, 2015-2017, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * Scheduler hook for average runqueue determination + */ +#include +#include +#include +#include +#include + +#include "sched.h" +#include + +static DEFINE_PER_CPU(u64, nr_prod_sum); +static DEFINE_PER_CPU(u64, last_time); +static DEFINE_PER_CPU(u64, nr_big_prod_sum); +static DEFINE_PER_CPU(u64, nr); +static DEFINE_PER_CPU(u64, nr_max); + +static DEFINE_PER_CPU(unsigned long, iowait_prod_sum); +static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock); +static s64 last_get_time; + +#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y) +/** + * sched_get_nr_running_avg + * @return: Average nr_running, iowait and nr_big_tasks value since last poll. + * Returns the avg * 100 to return up to two decimal points + * of accuracy. + * + * Obtains the average nr_running value since the last poll. + * This function may not be called concurrently with itself + */ +void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, + unsigned int *max_nr, unsigned int *big_max_nr) +{ + int cpu; + u64 curr_time = sched_clock(); + u64 diff = curr_time - last_get_time; + u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0; + + *avg = 0; + *iowait_avg = 0; + *big_avg = 0; + *max_nr = 0; + *big_max_nr = 0; + + if (!diff) + return; + + /* read and reset nr_running counts */ + for_each_possible_cpu(cpu) { + unsigned long flags; + + spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); + curr_time = sched_clock(); + diff = curr_time - per_cpu(last_time, cpu); + BUG_ON((s64)diff < 0); + + tmp_avg += per_cpu(nr_prod_sum, cpu); + tmp_avg += per_cpu(nr, cpu) * diff; + + tmp_big_avg += per_cpu(nr_big_prod_sum, cpu); + tmp_big_avg += nr_eligible_big_tasks(cpu) * diff; + + tmp_iowait += per_cpu(iowait_prod_sum, cpu); + tmp_iowait += nr_iowait_cpu(cpu) * diff; + + per_cpu(last_time, cpu) = curr_time; + + per_cpu(nr_prod_sum, cpu) = 0; + per_cpu(nr_big_prod_sum, cpu) = 0; + per_cpu(iowait_prod_sum, cpu) = 0; + + if (*max_nr < per_cpu(nr_max, cpu)) + *max_nr = per_cpu(nr_max, cpu); + + if (is_max_capacity_cpu(cpu)) { + if (*big_max_nr < per_cpu(nr_max, cpu)) + *big_max_nr = per_cpu(nr_max, cpu); + } + + per_cpu(nr_max, cpu) = per_cpu(nr, cpu); + spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); + } + + diff = curr_time - last_get_time; + last_get_time = curr_time; + + /* + * Any task running on BIG cluster and BIG tasks running on little + * cluster contributes to big_avg. Small or medium tasks can also + * run on BIG cluster when co-location and scheduler boost features + * are activated. We don't want these tasks to downmigrate to little + * cluster when BIG CPUs are available but isolated. Round up the + * average values so that core_ctl aggressively unisolate BIG CPUs. + */ + *avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff); + *big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff); + *iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff); + + trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg, + *max_nr, *big_max_nr); + + BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0); + pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n", + __func__, *avg, *big_avg, *iowait_avg); +} +EXPORT_SYMBOL(sched_get_nr_running_avg); + +static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0); + +#define BUSY_NR_RUN 3 +#define BUSY_LOAD_FACTOR 10 + +#ifdef CONFIG_SCHED_HMP +static inline void update_last_busy_time(int cpu, bool dequeue, + unsigned long prev_nr_run, u64 curr_time) +{ + bool nr_run_trigger = false, load_trigger = false; + + if (!hmp_capable() || is_min_capacity_cpu(cpu)) + return; + + if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN) + nr_run_trigger = true; + + if (dequeue) { + u64 load; + + load = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg; + load = scale_load_to_cpu(load, cpu); + + if (load * BUSY_LOAD_FACTOR > sched_ravg_window) + load_trigger = true; + } + + if (nr_run_trigger || load_trigger) + atomic64_set(&per_cpu(last_busy_time, cpu), curr_time); +} +#else +static inline void update_last_busy_time(int cpu, bool dequeue, + unsigned long prev_nr_run, u64 curr_time) +{ +} +#endif + +/** + * sched_update_nr_prod + * @cpu: The core id of the nr running driver. + * @delta: Adjust nr by 'delta' amount + * @inc: Whether we are increasing or decreasing the count + * @return: N/A + * + * Update average with latest nr_running value for CPU + */ +void sched_update_nr_prod(int cpu, long delta, bool inc) +{ + u64 diff; + u64 curr_time; + unsigned long flags, nr_running; + + spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); + nr_running = per_cpu(nr, cpu); + curr_time = sched_clock(); + diff = curr_time - per_cpu(last_time, cpu); + BUG_ON((s64)diff < 0); + per_cpu(last_time, cpu) = curr_time; + per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta); + + BUG_ON((s64)per_cpu(nr, cpu) < 0); + + if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu)) + per_cpu(nr_max, cpu) = per_cpu(nr, cpu); + + update_last_busy_time(cpu, !inc, nr_running, curr_time); + + per_cpu(nr_prod_sum, cpu) += nr_running * diff; + per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff; + per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff; + spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags); +} +EXPORT_SYMBOL(sched_update_nr_prod); + +u64 sched_get_cpu_last_busy_time(int cpu) +{ + return atomic64_read(&per_cpu(last_busy_time, cpu)); +} diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index a5567ccd8803..3278c81cefb1 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -1,5 +1,4 @@ #include "sched.h" -#include "walt.h" /* * stop-task scheduling class. @@ -19,6 +18,41 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags, } #endif /* CONFIG_SMP */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p, + u32 new_task_load, u32 new_pred_demand) +{ + s64 task_load_delta = (s64)new_task_load - task_load(p); + s64 pred_demand_delta = PRED_DEMAND_DELTA; + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta, + pred_demand_delta); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static void check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) { @@ -44,14 +78,14 @@ static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { add_nr_running(rq, 1); - walt_inc_cumulative_runnable_avg(rq, p); + inc_hmp_sched_stats_stop(rq, p); } static void dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { sub_nr_running(rq, 1); - walt_dec_cumulative_runnable_avg(rq, p); + dec_hmp_sched_stats_stop(rq, p); } static void yield_task_stop(struct rq *rq) @@ -138,4 +172,9 @@ const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, .update_curr = update_curr_stop, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_stop, + .dec_hmp_sched_stats = dec_hmp_sched_stats_stop, + .fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop, +#endif }; diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 8a13404605bc..b84d13750604 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -121,6 +121,33 @@ struct schedtune { /* Boost value for tasks on that SchedTune CGroup */ int boost; +#ifdef CONFIG_SCHED_HMP + /* Toggle ability to override sched boost enabled */ + bool sched_boost_no_override; + + /* + * Controls whether a cgroup is eligible for sched boost or not. This + * can temporariliy be disabled by the kernel based on the no_override + * flag above. + */ + bool sched_boost_enabled; + + /* + * This tracks the default value of sched_boost_enabled and is used + * restore the value following any temporary changes to that flag. + */ + bool sched_boost_enabled_backup; + + /* + * Controls whether tasks of this cgroup should be colocated with each + * other and tasks of other cgroups that have the same flag turned on. + */ + bool colocate; + + /* Controls whether further updates are allowed to the colocate flag */ + bool colocate_update_disabled; +#endif + /* Performance Boost (B) region threshold params */ int perf_boost_idx; @@ -159,6 +186,13 @@ static inline struct schedtune *parent_st(struct schedtune *st) static struct schedtune root_schedtune = { .boost = 0, +#ifdef CONFIG_SCHED_HMP + .sched_boost_no_override = false, + .sched_boost_enabled = true, + .sched_boost_enabled_backup = true, + .colocate = false, + .colocate_update_disabled = false, +#endif .perf_boost_idx = 0, .perf_constrain_idx = 0, .prefer_idle = 0, @@ -239,6 +273,121 @@ struct boost_groups { /* Boost groups affecting each CPU in the system */ DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups); +#ifdef CONFIG_SCHED_HMP +static inline void init_sched_boost(struct schedtune *st) +{ + st->sched_boost_no_override = false; + st->sched_boost_enabled = true; + st->sched_boost_enabled_backup = st->sched_boost_enabled; + st->colocate = false; + st->colocate_update_disabled = false; +} + +bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) +{ + return task_schedtune(tsk1) == task_schedtune(tsk2); +} + +void update_cgroup_boost_settings(void) +{ + int i; + + for (i = 0; i < BOOSTGROUPS_COUNT; i++) { + if (!allocated_group[i]) + break; + + if (allocated_group[i]->sched_boost_no_override) + continue; + + allocated_group[i]->sched_boost_enabled = false; + } +} + +void restore_cgroup_boost_settings(void) +{ + int i; + + for (i = 0; i < BOOSTGROUPS_COUNT; i++) { + if (!allocated_group[i]) + break; + + allocated_group[i]->sched_boost_enabled = + allocated_group[i]->sched_boost_enabled_backup; + } +} + +bool task_sched_boost(struct task_struct *p) +{ + struct schedtune *st = task_schedtune(p); + + return st->sched_boost_enabled; +} + +static u64 +sched_boost_override_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct schedtune *st = css_st(css); + + return st->sched_boost_no_override; +} + +static int sched_boost_override_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 override) +{ + struct schedtune *st = css_st(css); + + st->sched_boost_no_override = !!override; + + return 0; +} + +static u64 sched_boost_enabled_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct schedtune *st = css_st(css); + + return st->sched_boost_enabled; +} + +static int sched_boost_enabled_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 enable) +{ + struct schedtune *st = css_st(css); + + st->sched_boost_enabled = !!enable; + st->sched_boost_enabled_backup = st->sched_boost_enabled; + + return 0; +} + +static u64 sched_colocate_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct schedtune *st = css_st(css); + + return st->colocate; +} + +static int sched_colocate_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 colocate) +{ + struct schedtune *st = css_st(css); + + if (st->colocate_update_disabled) + return -EPERM; + + st->colocate = !!colocate; + st->colocate_update_disabled = true; + return 0; +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void init_sched_boost(struct schedtune *st) { } + +#endif /* CONFIG_SCHED_HMP */ + static void schedtune_cpu_update(int cpu) { @@ -619,6 +768,22 @@ boost_write(struct cgroup_subsys_state *css, struct cftype *cft, return 0; } +static void schedtune_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *css; + struct schedtune *st; + bool colocate; + + cgroup_taskset_first(tset, &css); + st = css_st(css); + + colocate = st->colocate; + + cgroup_taskset_for_each(task, css, tset) + sync_cgroup_colocation(task, colocate); +} + static struct cftype files[] = { { .name = "boost", @@ -630,6 +795,23 @@ static struct cftype files[] = { .read_u64 = prefer_idle_read, .write_u64 = prefer_idle_write, }, +#ifdef CONFIG_SCHED_HMP + { + .name = "sched_boost_no_override", + .read_u64 = sched_boost_override_read, + .write_u64 = sched_boost_override_write, + }, + { + .name = "sched_boost_enabled", + .read_u64 = sched_boost_enabled_read, + .write_u64 = sched_boost_enabled_write, + }, + { + .name = "colocate", + .read_u64 = sched_colocate_read, + .write_u64 = sched_colocate_write, + }, +#endif { } /* terminate */ }; @@ -683,6 +865,7 @@ schedtune_css_alloc(struct cgroup_subsys_state *parent_css) /* Initialize per CPUs boost group support */ st->idx = idx; + init_sched_boost(st); if (schedtune_boostgroup_init(st)) goto release; @@ -720,6 +903,7 @@ struct cgroup_subsys schedtune_cgrp_subsys = { .cancel_attach = schedtune_cancel_attach, .legacy_cftypes = files, .early_init = 1, + .attach = schedtune_attach, }; static inline void @@ -915,7 +1099,8 @@ schedtune_init(void) */ sd = rcu_dereference(per_cpu(sd_ea, cpumask_first(cpu_online_mask))); if (!sd) { - pr_info("schedtune: no energy model data\n"); + if (energy_aware()) + pr_warn("schedtune: no energy model data\n"); goto nodata; } diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 675228037d12..f15d6b6a538a 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -10,7 +10,6 @@ #include #include #include -#include void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) { @@ -157,13 +156,6 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void __wake_up_pollfree(wait_queue_head_t *wq_head) -{ - __wake_up(wq_head, TASK_NORMAL, 0, (void *)(POLLHUP | POLLFREE)); - /* POLLFREE must have cleared the queue. */ - WARN_ON_ONCE(waitqueue_active(wq_head)); -} - /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any diff --git a/kernel/signal.c b/kernel/signal.c index a699055ebfe8..6aa9ca45ebb1 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1823,6 +1823,16 @@ static inline int may_ptrace_stop(void) return 1; } +/* + * Return non-zero if there is a SIGKILL that should be waking us up. + * Called with the siglock held. + */ +static int sigkill_pending(struct task_struct *tsk) +{ + return sigismember(&tsk->pending.signal, SIGKILL) || + sigismember(&tsk->signal->shared_pending.signal, SIGKILL); +} + /* * This must be called with current->sighand->siglock held. * @@ -1848,10 +1858,15 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * calling arch_ptrace_stop, so we must release it now. * To preserve proper semantics, we must do this before * any signal bookkeeping like checking group_stop_count. + * Meanwhile, a SIGKILL could come in before we retake the + * siglock. That must prevent us from sleeping in TASK_TRACED. + * So after regaining the lock, we must check for SIGKILL. */ spin_unlock_irq(¤t->sighand->siglock); arch_ptrace_stop(exit_code, info); spin_lock_irq(¤t->sighand->siglock); + if (sigkill_pending(current)) + return; } /* @@ -1860,8 +1875,6 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * Also, transition to TRACED and updates to ->jobctl should be * atomic with respect to siglock and should be done after the arch * hook as siglock is released and regrabbed across it. - * schedule() will not sleep if there is a pending signal that - * can awaken the task. */ set_current_state(TASK_TRACED); diff --git a/kernel/sys.c b/kernel/sys.c index d5ea3360038c..5a40f5c07054 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1776,6 +1776,13 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map) error = -EINVAL; + /* + * @brk should be after @end_data in traditional maps. + */ + if (prctl_map->start_brk <= prctl_map->end_data || + prctl_map->brk <= prctl_map->end_data) + goto out; + /* * Neither we should allow to override limits if they set. */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 52f4297cbe58..fa543fd3a6db 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -138,6 +138,10 @@ static int one_hundred = 100; #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif +#ifdef CONFIG_SCHED_HMP +static int one_thousand = 1000; +static int max_freq_reporting_policy = FREQ_REPORT_INVALID_POLICY - 1; +#endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; @@ -224,28 +228,6 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write, #endif -#ifdef CONFIG_BPF_SYSCALL -static int bpf_unpriv_handler(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret, unpriv_enable = *(int *)table->data; - bool locked_state = unpriv_enable == 1; - struct ctl_table tmp = *table; - - if (write && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - tmp.data = &unpriv_enable; - ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); - if (write && !ret) { - if (locked_state && unpriv_enable != 1) - return -EPERM; - *(int *)table->data = unpriv_enable; - } - return ret; -} -#endif - static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; @@ -331,55 +313,236 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_SCHED_DEBUG +#ifdef CONFIG_SCHED_HMP { - .procname = "sched_min_granularity_ns", - .data = &sysctl_sched_min_granularity, + .procname = "sched_freq_reporting_policy", + .data = &sysctl_sched_freq_reporting_policy, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = sched_proc_update_handler, - .extra1 = &min_sched_granularity_ns, - .extra2 = &max_sched_granularity_ns, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &max_freq_reporting_policy, }, { - .procname = "sched_latency_ns", - .data = &sysctl_sched_latency, + .procname = "sched_freq_inc_notify", + .data = &sysctl_sched_freq_inc_notify, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = sched_proc_update_handler, - .extra1 = &min_sched_granularity_ns, - .extra2 = &max_sched_granularity_ns, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, -#ifdef CONFIG_SCHED_WALT { - .procname = "sched_use_walt_cpu_util", - .data = &sysctl_sched_use_walt_cpu_util, + .procname = "sched_freq_dec_notify", + .data = &sysctl_sched_freq_dec_notify, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, { - .procname = "sched_use_walt_task_util", - .data = &sysctl_sched_use_walt_task_util, + .procname = "sched_cpu_high_irqload", + .data = &sysctl_sched_cpu_high_irqload, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_ravg_hist_size", + .data = &sysctl_sched_ravg_hist_size, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_window_update_handler, + }, + { + .procname = "sched_window_stats_policy", + .data = &sysctl_sched_window_stats_policy, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_window_update_handler, + }, + { + .procname = "sched_spill_load", + .data = &sysctl_sched_spill_load_pct, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, }, { - .procname = "sched_walt_init_task_load_pct", - .data = &sysctl_sched_walt_init_task_load_pct, + .procname = "sched_spill_nr_run", + .data = &sysctl_sched_spill_nr_run, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, { - .procname = "sched_walt_cpu_high_irqload", - .data = &sysctl_sched_walt_cpu_high_irqload, + .procname = "sched_upmigrate", + .data = &sysctl_sched_upmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, + { + .procname = "sched_downmigrate", + .data = &sysctl_sched_downmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, + { + .procname = "sched_group_upmigrate", + .data = &sysctl_sched_group_upmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + }, + { + .procname = "sched_group_downmigrate", + .data = &sysctl_sched_group_downmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + }, + { + .procname = "sched_init_task_load", + .data = &sysctl_sched_init_task_load_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, + { + .procname = "sched_select_prev_cpu_us", + .data = &sysctl_sched_select_prev_cpu_us, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + }, + { + .procname = "sched_restrict_cluster_spill", + .data = &sysctl_sched_restrict_cluster_spill, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "sched_small_wakee_task_load", + .data = &sysctl_sched_small_wakee_task_load_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, + { + .procname = "sched_big_waker_task_load", + .data = &sysctl_sched_big_waker_task_load_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, + { + .procname = "sched_prefer_sync_wakee_to_waker", + .data = &sysctl_sched_prefer_sync_wakee_to_waker, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "sched_enable_thread_grouping", + .data = &sysctl_sched_enable_thread_grouping, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_pred_alert_freq", + .data = &sysctl_sched_pred_alert_freq, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, + { + .procname = "sched_freq_aggregate", + .data = &sysctl_sched_freq_aggregate, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_window_update_handler, + }, + { + .procname = "sched_freq_aggregate_threshold", + .data = &sysctl_sched_freq_aggregate_threshold_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_hmp_proc_update_handler, + .extra1 = &zero, + /* + * Special handling for sched_freq_aggregate_threshold_pct + * which can be greater than 100. Use 1000 as an upper bound + * value which works for all practical use cases. + */ + .extra2 = &one_thousand, + }, + { + .procname = "sched_boost", + .data = &sysctl_sched_boost, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_boost_handler, + .extra1 = &zero, + .extra2 = &three, + }, + { + .procname = "sched_short_burst_ns", + .data = &sysctl_sched_short_burst, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, -#endif + { + .procname = "sched_short_sleep_ns", + .data = &sysctl_sched_short_sleep, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#endif /* CONFIG_SCHED_HMP */ +#ifdef CONFIG_SCHED_DEBUG + { + .procname = "sched_min_granularity_ns", + .data = &sysctl_sched_min_granularity, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_proc_update_handler, + .extra1 = &min_sched_granularity_ns, + .extra2 = &max_sched_granularity_ns, + }, + { + .procname = "sched_latency_ns", + .data = &sysctl_sched_latency, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_proc_update_handler, + .extra1 = &min_sched_granularity_ns, + .extra2 = &max_sched_granularity_ns, + }, { .procname = "sched_sync_hint_enable", .data = &sysctl_sched_sync_hint_enable, @@ -1286,9 +1449,10 @@ static struct ctl_table kern_table[] = { .data = &sysctl_unprivileged_bpf_disabled, .maxlen = sizeof(sysctl_unprivileged_bpf_disabled), .mode = 0644, - .proc_handler = bpf_unpriv_handler, - .extra1 = &zero, - .extra2 = &two, + /* only handle a transition from default "0" to "1" */ + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &one, }, #endif #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 130d4d5e2251..dc20e85d2e11 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1008,7 +1008,8 @@ int do_settimeofday64(const struct timespec64 *ts) timekeeping_forward_now(tk); xt = tk_xtime(tk); - ts_delta = timespec64_sub(*ts, xt); + ts_delta.tv_sec = ts->tv_sec - xt.tv_sec; + ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec; if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) { ret = -EINVAL; diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7f9e09d1bed3..243c1b89eb90 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1633,14 +1633,6 @@ static int blk_trace_remove_queue(struct request_queue *q) if (bt == NULL) return -EINVAL; - if (bt->trace_state == Blktrace_running) { - bt->trace_state = Blktrace_stopped; - spin_lock_irq(&running_trace_lock); - list_del_init(&bt->running_list); - spin_unlock_irq(&running_trace_lock); - relay_flush(bt->rchan); - } - put_probe_ref(); synchronize_rcu(); blk_trace_free(bt); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c5484723abda..89ed01911a9a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1943,18 +1943,12 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, static void print_ip_ins(const char *fmt, unsigned char *p) { - char ins[MCOUNT_INSN_SIZE]; int i; - if (probe_kernel_read(ins, p, MCOUNT_INSN_SIZE)) { - printk(KERN_CONT "%s[FAULT] %px\n", fmt, p); - return; - } - printk(KERN_CONT "%s", fmt); for (i = 0; i < MCOUNT_INSN_SIZE; i++) - printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]); + printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); } static struct ftrace_ops * @@ -4407,11 +4401,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) parser = &iter->parser; if (trace_parser_loaded(parser)) { - int enable = !(iter->flags & FTRACE_ITER_NOTRACE); - parser->buffer[parser->idx] = 0; - ftrace_process_regex(iter->hash, parser->buffer, - parser->idx, enable); + ftrace_match_records(iter->hash, parser->buffer, parser->idx); } trace_parser_put(parser); @@ -5185,7 +5176,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op; int bit; - bit = trace_test_and_set_recursion(TRACE_LIST_START); + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; @@ -5246,7 +5237,7 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, { int bit; - bit = trace_test_and_set_recursion(TRACE_LIST_START); + bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; @@ -5717,6 +5708,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) } if (t->ret_stack == NULL) { + atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->curr_ret_stack = -1; /* Make sure the tasks see the -1 first: */ @@ -5928,6 +5920,7 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); static void graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) { + atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->ftrace_timestamp = 0; /* make curr_ret_stack visible before we add the ret_stack */ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 19b30ff90cc4..547a3a5ac57b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3086,30 +3086,10 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) if (unlikely(!head)) return true; - /* Reader should exhaust content in reader page */ - if (reader->read != rb_page_commit(reader)) - return false; - - /* - * If writers are committing on the reader page, knowing all - * committed content has been read, the ring buffer is empty. - */ - if (commit == reader) - return true; - - /* - * If writers are committing on a page other than reader page - * and head page, there should always be content to read. - */ - if (commit != head) - return false; - - /* - * Writers are committing on the head page, we just need - * to care about there're committed data, and the reader will - * swap reader page with head page when it is to read data. - */ - return rb_page_commit(commit) == 0; + return reader->read == rb_page_commit(reader) && + (commit == reader || + (commit == head && + head->read == rb_page_commit(commit))); } /** @@ -4314,8 +4294,6 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; - /* prevent another thread from changing buffer sizes */ - mutex_lock(&buffer->mutex); atomic_inc(&buffer->resize_disabled); atomic_inc(&cpu_buffer->record_disabled); @@ -4339,8 +4317,6 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&buffer->resize_disabled); - - mutex_unlock(&buffer->mutex); } EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 38b8ff7a4c46..c068b66641af 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -240,10 +239,6 @@ __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { - /* Ignore the "tp_printk_stop_on_boot" param */ - if (*str == '_') - return 0; - if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; @@ -1385,6 +1380,9 @@ struct saved_cmdlines_buffer { }; static struct saved_cmdlines_buffer *savedcmd; +/* temporary disable recording */ +static atomic_t trace_record_cmdline_disabled __read_mostly; + static inline char *get_saved_cmdlines(int idx) { return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; @@ -1586,13 +1584,10 @@ void trace_stop_cmdline_recording(void); static int trace_save_cmdline(struct task_struct *tsk) { - unsigned tpid, idx; + unsigned pid, idx; - /* treat recording of idle task as a success */ - if (!tsk->pid) - return 1; - - tpid = tsk->pid & (PID_MAX_DEFAULT - 1); + if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) + return 0; preempt_disable(); /* @@ -1606,15 +1601,26 @@ static int trace_save_cmdline(struct task_struct *tsk) return 0; } - idx = savedcmd->map_pid_to_cmdline[tpid]; + idx = savedcmd->map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; - savedcmd->map_pid_to_cmdline[tpid] = idx; + /* + * Check whether the cmdline buffer at idx has a pid + * mapped. We are going to overwrite that entry so we + * need to clear the map_pid_to_cmdline. Otherwise we + * would read the new comm for the old pid. + */ + pid = savedcmd->map_cmdline_to_pid[idx]; + if (pid != NO_CMDLINE_MAP) + savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; + + savedcmd->map_cmdline_to_pid[idx] = tsk->pid; + savedcmd->map_pid_to_cmdline[tsk->pid] = idx; + savedcmd->cmdline_idx = idx; } - savedcmd->map_cmdline_to_pid[idx] = tsk->pid; set_cmdline(idx, tsk->comm); savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid; arch_spin_unlock(&trace_cmdline_lock); @@ -1626,7 +1632,6 @@ static int trace_save_cmdline(struct task_struct *tsk) static void __trace_find_cmdline(int pid, char comm[]) { unsigned map; - int tpid; if (!pid) { strcpy(comm, ""); @@ -1638,14 +1643,9 @@ static void __trace_find_cmdline(int pid, char comm[]) return; } - tpid = pid & (PID_MAX_DEFAULT - 1); - map = savedcmd->map_pid_to_cmdline[tpid]; - if (map != NO_CMDLINE_MAP) { - tpid = savedcmd->map_cmdline_to_pid[map]; - if (tpid == pid) { - strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); - return; - } + if (pid > PID_MAX_DEFAULT) { + strcpy(comm, "<...>"); + return; } map = savedcmd->map_pid_to_cmdline[pid]; @@ -1697,6 +1697,9 @@ int trace_find_tgid(int pid) void tracing_record_cmdline(struct task_struct *tsk) { + if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) + return; + if (!__this_cpu_read(trace_cmdline_save)) return; @@ -1759,7 +1762,7 @@ void trace_buffer_unlock_commit(struct trace_array *tr, __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); - ftrace_trace_userstack(tr, buffer, flags, pc); + ftrace_trace_userstack(buffer, flags, pc); } EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); @@ -1821,7 +1824,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, * two. They are that meaningful. */ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs); - ftrace_trace_userstack(tr, buffer, flags, pc); + ftrace_trace_userstack(buffer, flags, pc); } EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); @@ -1920,8 +1923,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, size *= sizeof(unsigned long); event = trace_buffer_lock_reserve(buffer, TRACE_STACK, - (sizeof(*entry) - sizeof(entry->caller)) + size, - flags, pc); + sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); @@ -1995,15 +1997,14 @@ void trace_dump_stack(int skip) static DEFINE_PER_CPU(int, user_stack_count); void -ftrace_trace_userstack(struct trace_array *tr, - struct ring_buffer *buffer, unsigned long flags, int pc) +ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; - if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) + if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* @@ -2566,6 +2567,9 @@ static void *s_start(struct seq_file *m, loff_t *pos) return ERR_PTR(-EBUSY); #endif + if (!iter->snapshot) + atomic_inc(&trace_record_cmdline_disabled); + if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; @@ -2608,6 +2612,9 @@ static void s_stop(struct seq_file *m, void *p) return; #endif + if (!iter->snapshot) + atomic_dec(&trace_record_cmdline_disabled); + trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } @@ -6789,19 +6796,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) */ allocate_snapshot = false; #endif - - /* - * Because of some magic with the way alloc_percpu() works on - * x86_64, we need to synchronize the pgd of all the tables, - * otherwise the trace events that happen in x86_64 page fault - * handlers can't cope with accessing the chance that a - * alloc_percpu()'d memory might be touched in the page fault trace - * event. Oh, and we need to audit all other alloc_percpu() and vmalloc() - * calls in tracing, because something might get triggered within a - * page fault trace event! - */ - vmalloc_sync_mappings(); - return 0; } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 53e8ccbae818..6f9534635129 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -431,8 +431,23 @@ struct tracer { * When function tracing occurs, the following steps are made: * If arch does not support a ftrace feature: * call internal function (uses INTERNAL bits) which calls... + * If callback is registered to the "global" list, the list + * function is called and recursion checks the GLOBAL bits. + * then this function calls... * The function callback, which can use the FTRACE bits to * check for recursion. + * + * Now if the arch does not suppport a feature, and it calls + * the global list function which calls the ftrace callback + * all three of these steps will do a recursion protection. + * There's no reason to do one if the previous caller already + * did. The recursion that we are protecting against will + * go through the same steps again. + * + * To prevent the multiple recursion checks, if a recursion + * bit is set that is higher than the MAX bit of the current + * check, then we know that the check was made by the previous + * caller, and we can skip the current check. */ enum { TRACE_BUFFER_BIT, @@ -445,14 +460,12 @@ enum { TRACE_FTRACE_NMI_BIT, TRACE_FTRACE_IRQ_BIT, TRACE_FTRACE_SIRQ_BIT, - TRACE_FTRACE_TRANSITION_BIT, - /* Internal use recursion bits */ + /* INTERNAL_BITs must be greater than FTRACE_BITs */ TRACE_INTERNAL_BIT, TRACE_INTERNAL_NMI_BIT, TRACE_INTERNAL_IRQ_BIT, TRACE_INTERNAL_SIRQ_BIT, - TRACE_INTERNAL_TRANSITION_BIT, TRACE_CONTROL_BIT, @@ -465,6 +478,12 @@ enum { * can only be modified by current, we can reuse trace_recursion. */ TRACE_IRQ_BIT, + + /* + * When transitioning between context, the preempt_count() may + * not be correct. Allow for a single recursion to cover this case. + */ + TRACE_TRANSITION_BIT, }; #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) @@ -474,18 +493,12 @@ enum { #define TRACE_CONTEXT_BITS 4 #define TRACE_FTRACE_START TRACE_FTRACE_BIT +#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) #define TRACE_LIST_START TRACE_INTERNAL_BIT +#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) -#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) - -enum { - TRACE_CTX_NMI, - TRACE_CTX_IRQ, - TRACE_CTX_SOFTIRQ, - TRACE_CTX_NORMAL, - TRACE_CTX_TRANSITION, -}; +#define TRACE_CONTEXT_MASK TRACE_LIST_MAX static __always_inline int trace_get_context_bit(void) { @@ -493,48 +506,59 @@ static __always_inline int trace_get_context_bit(void) if (in_interrupt()) { if (in_nmi()) - bit = TRACE_CTX_NMI; + bit = 0; else if (in_irq()) - bit = TRACE_CTX_IRQ; + bit = 1; else - bit = TRACE_CTX_SOFTIRQ; + bit = 2; } else - bit = TRACE_CTX_NORMAL; + bit = 3; return bit; } -static __always_inline int trace_test_and_set_recursion(int start) +static __always_inline int trace_test_and_set_recursion(int start, int max) { unsigned int val = current->trace_recursion; int bit; + /* A previous recursion check was made */ + if ((val & TRACE_CONTEXT_MASK) > max) + return 0; + bit = trace_get_context_bit() + start; if (unlikely(val & (1 << bit))) { /* * It could be that preempt_count has not been updated during * a switch between contexts. Allow for a single recursion. */ - bit = start + TRACE_CTX_TRANSITION; + bit = TRACE_TRANSITION_BIT; if (trace_recursion_test(bit)) return -1; trace_recursion_set(bit); barrier(); - return bit; + return bit + 1; } + /* Normal check passed, clear the transition to allow it again */ + trace_recursion_clear(TRACE_TRANSITION_BIT); + val |= 1 << bit; current->trace_recursion = val; barrier(); - return bit; + return bit + 1; } static __always_inline void trace_clear_recursion(int bit) { unsigned int val = current->trace_recursion; + if (!bit) + return; + + bit--; bit = 1 << bit; val &= ~bit; @@ -632,15 +656,13 @@ void update_max_tr_single(struct trace_array *tr, #endif /* CONFIG_TRACER_MAX_TRACE */ #ifdef CONFIG_STACKTRACE -void ftrace_trace_userstack(struct trace_array *tr, - struct ring_buffer *buffer, unsigned long flags, +void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc); void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc); #else -static inline void ftrace_trace_userstack(struct trace_array *tr, - struct ring_buffer *buffer, +static inline void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { } diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index b70233a9563f..0f06532a755b 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -93,49 +93,33 @@ u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; - u64 now, prev_time; + u64 now; local_irq_save(flags); this_cpu = raw_smp_processor_id(); - + now = sched_clock_cpu(this_cpu); /* - * The global clock "guarantees" that the events are ordered - * between CPUs. But if two events on two different CPUS call - * trace_clock_global at roughly the same time, it really does - * not matter which one gets the earlier time. Just make sure - * that the same CPU will always show a monotonic clock. - * - * Use a read memory barrier to get the latest written - * time that was recorded. + * If in an NMI context then dont risk lockups and return the + * cpu_clock() time: */ - smp_rmb(); - prev_time = READ_ONCE(trace_clock_struct.prev_time); - now = sched_clock_cpu(this_cpu); + if (unlikely(in_nmi())) + goto out; - /* Make sure that now is always greater than or equal to prev_time */ - if ((s64)(now - prev_time) < 0) - now = prev_time; + arch_spin_lock(&trace_clock_struct.lock); /* - * If in an NMI context then dont risk lockups and simply return - * the current time. + * TODO: if this happens often then maybe we should reset + * my_scd->clock to prev_time+1, to make sure + * we start ticking with the local clock from now on? */ - if (unlikely(in_nmi())) - goto out; + if ((s64)(now - trace_clock_struct.prev_time) < 0) + now = trace_clock_struct.prev_time + 1; - /* Tracing can cause strange recursion, always use a try lock */ - if (arch_spin_trylock(&trace_clock_struct.lock)) { - /* Reread prev_time in case it was already updated */ - prev_time = READ_ONCE(trace_clock_struct.prev_time); - if ((s64)(now - prev_time) < 0) - now = prev_time; + trace_clock_struct.prev_time = now; - trace_clock_struct.prev_time = now; + arch_spin_unlock(&trace_clock_struct.lock); - /* The unlock acts as the wmb for the above rmb */ - arch_spin_unlock(&trace_clock_struct.lock); - } out: local_irq_restore(flags); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 15ca669ea5d5..faeb44077cd8 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1084,8 +1084,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { call = file->event_call; - if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || - !trace_event_name(call) || !call->class || !call->class->reg) + if (!trace_event_name(call) || !call->class || !call->class->reg) continue; if (system && strcmp(call->class->system, system->name) != 0) @@ -2342,19 +2341,12 @@ static struct trace_event_file * trace_create_new_event(struct trace_event_call *call, struct trace_array *tr) { - struct trace_pid_list *pid_list; struct trace_event_file *file; file = kmem_cache_alloc(file_cachep, GFP_TRACE); if (!file) return NULL; - pid_list = rcu_dereference_protected(tr->filtered_pids, - lockdep_is_held(&event_mutex)); - - if (pid_list) - file->flags |= EVENT_FILE_FL_PID_FILTER; - file->event_call = call; file->tr = tr; atomic_set(&file->sm_ref, 0); diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 7adbfcf555fd..fcd41a166405 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -137,7 +137,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, pc = preempt_count(); preempt_disable_notrace(); - bit = trace_test_and_set_recursion(TRACE_FTRACE_START); + bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); if (bit < 0) goto out; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index a1f9be703002..eda85bbf1c2e 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -59,12 +59,6 @@ struct tp_probes { struct tracepoint_func probes[0]; }; -/* Called in removal of a func but failed to allocate a new tp_funcs */ -static void tp_stub_func(void) -{ - return; -} - static inline void *allocate_probes(int count) { struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func) @@ -103,7 +97,6 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, { struct tracepoint_func *old, *new; int nr_probes = 0; - int stub_funcs = 0; int pos = -1; if (WARN_ON(!tp_func->func)) @@ -120,34 +113,14 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, if (old[nr_probes].func == tp_func->func && old[nr_probes].data == tp_func->data) return ERR_PTR(-EEXIST); - if (old[nr_probes].func == tp_stub_func) - stub_funcs++; } } - /* + 2 : one for new probe, one for NULL func - stub functions */ - new = allocate_probes(nr_probes + 2 - stub_funcs); + /* + 2 : one for new probe, one for NULL func */ + new = allocate_probes(nr_probes + 2); if (new == NULL) return ERR_PTR(-ENOMEM); if (old) { - if (stub_funcs) { - /* Need to copy one at a time to remove stubs */ - int probes = 0; - - pos = -1; - for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if (old[nr_probes].func == tp_stub_func) - continue; - if (pos < 0 && old[nr_probes].prio < prio) - pos = probes++; - new[probes++] = old[nr_probes]; - } - nr_probes = probes; - if (pos < 0) - pos = probes; - else - nr_probes--; /* Account for insertion */ - - } else if (pos < 0) { + if (pos < 0) { pos = nr_probes; memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); } else { @@ -181,9 +154,8 @@ static void *func_remove(struct tracepoint_func **funcs, /* (N -> M), (N > 1, M >= 0) probes */ if (tp_func->func) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if ((old[nr_probes].func == tp_func->func && - old[nr_probes].data == tp_func->data) || - old[nr_probes].func == tp_stub_func) + if (old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) nr_del++; } } @@ -202,32 +174,14 @@ static void *func_remove(struct tracepoint_func **funcs, /* N -> M, (N > 1, M > 0) */ /* + 1 for NULL */ new = allocate_probes(nr_probes - nr_del + 1); - if (new) { - for (i = 0; old[i].func; i++) - if ((old[i].func != tp_func->func - || old[i].data != tp_func->data) - && old[i].func != tp_stub_func) - new[j++] = old[i]; - new[nr_probes - nr_del].func = NULL; - *funcs = new; - } else { - /* - * Failed to allocate, replace the old function - * with calls to tp_stub_func. - */ - for (i = 0; old[i].func; i++) - if (old[i].func == tp_func->func && - old[i].data == tp_func->data) { - old[i].func = tp_stub_func; - /* Set the prio to the next event. */ - if (old[i + 1].func) - old[i].prio = - old[i + 1].prio; - else - old[i].prio = -1; - } - *funcs = old; - } + if (new == NULL) + return ERR_PTR(-ENOMEM); + for (i = 0; old[i].func; i++) + if (old[i].func != tp_func->func + || old[i].data != tp_func->data) + new[j++] = old[i]; + new[nr_probes - nr_del].func = NULL; + *funcs = new; } debug_print_probes(*funcs); return old; @@ -280,12 +234,10 @@ static int tracepoint_remove_func(struct tracepoint *tp, tp_funcs = rcu_dereference_protected(tp->funcs, lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); - if (WARN_ON_ONCE(IS_ERR(old))) + if (IS_ERR(old)) { + WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); return PTR_ERR(old); - - if (tp_funcs == old) - /* Failed allocating new tp_funcs, replaced func with stub */ - return 0; + } if (!tp_funcs) { /* Removed last function */ diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 2478ba0d91a0..975cb49e32bf 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -44,10 +44,11 @@ void bacct_add_tsk(struct user_namespace *user_ns, /* Convert to seconds for btime */ do_div(delta, USEC_PER_SEC); stats->ac_btime = get_seconds() - delta; - if (tsk->flags & PF_EXITING) + if (thread_group_leader(tsk)) { stats->ac_exitcode = tsk->exit_code; - if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) - stats->ac_flag |= AFORK; + if (tsk->flags & PF_FORKNOEXEC) + stats->ac_flag |= AFORK; + } if (tsk->flags & PF_SUPERPRIV) stats->ac_flag |= ASU; if (tsk->flags & PF_DUMPCORE) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cd9f947d83bc..777c5868091a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1363,6 +1363,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ WARN_ON_ONCE(!irqs_disabled()); + debug_work_activate(work); /* if draining, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && @@ -1443,7 +1444,6 @@ retry: worklist = &pwq->delayed_works; } - debug_work_activate(work); insert_work(pwq, work, worklist, work_flags); spin_unlock(&pwq->pool->lock); @@ -3331,21 +3331,15 @@ static void pwq_unbound_release_workfn(struct work_struct *work) unbound_release_work); struct workqueue_struct *wq = pwq->wq; struct worker_pool *pool = pwq->pool; - bool is_last = false; + bool is_last; - /* - * when @pwq is not linked, it doesn't hold any reference to the - * @wq, and @wq is invalid to access. - */ - if (!list_empty(&pwq->pwqs_node)) { - if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) - return; + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) + return; - mutex_lock(&wq->mutex); - list_del_rcu(&pwq->pwqs_node); - is_last = list_empty(&wq->pwqs); - mutex_unlock(&wq->mutex); - } + mutex_lock(&wq->mutex); + list_del_rcu(&pwq->pwqs_node); + is_last = list_empty(&wq->pwqs); + mutex_unlock(&wq->mutex); mutex_lock(&wq_pool_mutex); put_unbound_pool(pool); @@ -3389,24 +3383,17 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) * is updated and visible. */ if (!freezable || !workqueue_freezing) { - bool kick = false; - pwq->max_active = wq->saved_max_active; while (!list_empty(&pwq->delayed_works) && - pwq->nr_active < pwq->max_active) { + pwq->nr_active < pwq->max_active) pwq_activate_first_delayed(pwq); - kick = true; - } /* * Need to kick a worker after thawed or an unbound wq's - * max_active is bumped. In realtime scenarios, always kicking a - * worker will cause interference on the isolated cpu cores, so - * let's kick iff work items were activated. + * max_active is bumped. It's a slow path. Do it always. */ - if (kick) - wake_up_worker(pwq->pool); + wake_up_worker(pwq->pool); } else { pwq->max_active = 0; } diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index f1449244fdd4..036fc882cd72 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c @@ -115,9 +115,6 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, error("data corrupted"); goto exit_2; } - } else if (size < 4) { - /* empty or end-of-file */ - goto exit_3; } chunksize = get_unaligned_le32(inp); @@ -131,10 +128,6 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, continue; } - if (!fill && chunksize == 0) { - /* empty or end-of-file */ - goto exit_3; - } if (posp) *posp += 4; @@ -191,7 +184,6 @@ STATIC inline int INIT unlz4(u8 *input, long in_len, } } -exit_3: ret = 0; exit_2: if (!input) diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c index abea25310ac7..25d59a95bd66 100644 --- a/lib/decompress_unxz.c +++ b/lib/decompress_unxz.c @@ -167,7 +167,7 @@ * memeq and memzero are not used much and any remotely sane implementation * is fast enough. memcpy/memmove speed matters in multi-call mode, but * the kernel image is decompressed in single-call mode, in which only - * memmove speed can matter and only if there is a lot of uncompressible data + * memcpy speed can matter and only if there is a lot of uncompressible data * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the * functions below should just be kept small; it's probably not worth * optimizing for speed. diff --git a/lib/genalloc.c b/lib/genalloc.c index b8ac0450a2a6..e3a475b14e26 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -83,14 +83,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) * users set the same bit, one user will return remain bits, otherwise * return 0. */ -static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) +static int bitmap_set_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); - const unsigned long size = start + nr; + const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); - while (nr >= bits_to_set) { + while (nr - bits_to_set >= 0) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; @@ -118,15 +118,14 @@ static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long * users clear the same bit, one user will return remain bits, * otherwise return 0. */ -static unsigned long -bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) +static int bitmap_clear_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); - const unsigned long size = start + nr; + const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); - while (nr >= bits_to_clear) { + while (nr - bits_to_clear >= 0) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; @@ -185,8 +184,8 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy size_t size, int nid) { struct gen_pool_chunk *chunk; - unsigned long nbits = size >> pool->min_alloc_order; - unsigned long nbytes = sizeof(struct gen_pool_chunk) + + int nbits = size >> pool->min_alloc_order; + int nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); chunk = vzalloc_node(nbytes, nid); @@ -243,7 +242,7 @@ void gen_pool_destroy(struct gen_pool *pool) struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; - unsigned long bit, end_bit; + int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); @@ -275,7 +274,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; - unsigned long nbits, start_bit, end_bit, remain; + int nbits, start_bit, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -358,7 +357,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; - unsigned long start_bit, nbits, remain; + int start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); @@ -554,7 +553,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, index = bitmap_find_next_zero_area(map, size, start, nr, 0); while (index < size) { - unsigned long next_bit = find_next_bit(map, size, index + nr); + int next_bit = find_next_bit(map, size, index + nr); if ((next_bit - index) < len) { len = next_bit - index; start_bit = index; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 09a54c75cc70..daca582a8ed0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -311,7 +311,7 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) int err; struct iovec v; - if (iter_is_iovec(i)) { + if (!(i->type & (ITER_BVEC|ITER_KVEC))) { iterate_iovec(i, bytes, v, iov, skip, ({ err = fault_in_multipages_readable(v.iov_base, v.iov_len); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 6104daf98ad9..f6c2c1e7779c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -128,13 +128,12 @@ static int kobj_usermode_filter(struct kobject *kobj) static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) { - int buffer_size = sizeof(env->buf) - env->buflen; int len; - len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); - if (len >= buffer_size) { - pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", - buffer_size, len); + len = strlcpy(&env->buf[env->buflen], subsystem, + sizeof(env->buf) - env->buflen); + if (len >= (sizeof(env->buf) - env->buflen)) { + WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n"); return -ENOMEM; } diff --git a/lib/seq_buf.c b/lib/seq_buf.c index a139298ad6ca..cbef5ee4c459 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -227,10 +227,8 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, WARN_ON(s->size == 0); - BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS); - while (len) { - start_len = min(len, MAX_MEMHEX_BYTES); + start_len = min(len, HEX_CHARS - 1); #ifdef __BIG_ENDIAN for (i = 0, j = 0; i < start_len; i++) { #else @@ -243,14 +241,12 @@ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, break; /* j increments twice per loop */ + len -= j / 2; hex[j++] = ' '; seq_buf_putmem(s, hex, j); if (seq_buf_has_overflowed(s)) return -1; - - len -= start_len; - data += start_len; } return 0; } diff --git a/lib/siphash.c b/lib/siphash.c index e632ee40aac1..3ae58b4edad6 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -49,7 +49,6 @@ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -81,8 +80,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -114,6 +113,7 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); +#endif /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 @@ -250,7 +250,6 @@ EXPORT_SYMBOL(siphash_3u32); HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -281,8 +280,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -314,6 +313,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); +#endif /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 @@ -418,7 +418,6 @@ EXPORT_SYMBOL(hsiphash_4u32); HSIPROUND; \ return v1 ^ v3; -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); @@ -439,8 +438,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -462,6 +461,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len, HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); +#endif /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 diff --git a/lib/string.c b/lib/string.c index 2c6826fbe77a..7f4baad6fb19 100644 --- a/lib/string.c +++ b/lib/string.c @@ -157,9 +157,11 @@ EXPORT_SYMBOL(strlcpy); * @src: Where to copy the string from * @count: Size of destination buffer * - * Copy the string, or as much of it as fits, into the dest buffer. The - * behavior is undefined if the string buffers overlap. The destination - * buffer is always NUL terminated, unless it's zero-sized. + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. * * Preferred to strlcpy() since the API doesn't require reading memory * from the src string beyond the specified "count" bytes, and since @@ -169,10 +171,8 @@ EXPORT_SYMBOL(strlcpy); * * Preferred to strncpy() since it always returns a valid string, and * doesn't unnecessarily force the tail of the destination buffer to be - * zeroed. If zeroing is desired please use strscpy_pad(). - * - * Return: The number of characters copied (not including the trailing - * %NUL) or -E2BIG if the destination buffer wasn't big enough. + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. */ ssize_t strscpy(char *dest, const char *src, size_t count) { @@ -259,39 +259,6 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) } EXPORT_SYMBOL(stpcpy); -/** - * strscpy_pad() - Copy a C-string into a sized buffer - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @count: Size of destination buffer - * - * Copy the string, or as much of it as fits, into the dest buffer. The - * behavior is undefined if the string buffers overlap. The destination - * buffer is always %NUL terminated, unless it's zero-sized. - * - * If the source string is shorter than the destination buffer, zeros - * the tail of the destination buffer. - * - * For full explanation of why you may want to consider using the - * 'strscpy' functions please see the function docstring for strscpy(). - * - * Return: The number of characters copied (not including the trailing - * %NUL) or -E2BIG if the destination buffer wasn't big enough. - */ -ssize_t strscpy_pad(char *dest, const char *src, size_t count) -{ - ssize_t written; - - written = strscpy(dest, src, count); - if (written < 0 || written == count - 1) - return written; - - memset(dest + written + 1, 0, count - written - 1); - - return written; -} -EXPORT_SYMBOL(strscpy_pad); - #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another @@ -728,72 +695,6 @@ void memzero_explicit(void *s, size_t count) } EXPORT_SYMBOL(memzero_explicit); -#ifndef __HAVE_ARCH_MEMSET16 -/** - * memset16() - Fill a memory area with a uint16_t - * @s: Pointer to the start of the area. - * @v: The value to fill the area with - * @count: The number of values to store - * - * Differs from memset() in that it fills with a uint16_t instead - * of a byte. Remember that @count is the number of uint16_ts to - * store, not the number of bytes. - */ -void *memset16(uint16_t *s, uint16_t v, size_t count) -{ - uint16_t *xs = s; - - while (count--) - *xs++ = v; - return s; -} -EXPORT_SYMBOL(memset16); -#endif - -#ifndef __HAVE_ARCH_MEMSET32 -/** - * memset32() - Fill a memory area with a uint32_t - * @s: Pointer to the start of the area. - * @v: The value to fill the area with - * @count: The number of values to store - * - * Differs from memset() in that it fills with a uint32_t instead - * of a byte. Remember that @count is the number of uint32_ts to - * store, not the number of bytes. - */ -void *memset32(uint32_t *s, uint32_t v, size_t count) -{ - uint32_t *xs = s; - - while (count--) - *xs++ = v; - return s; -} -EXPORT_SYMBOL(memset32); -#endif - -#ifndef __HAVE_ARCH_MEMSET64 -/** - * memset64() - Fill a memory area with a uint64_t - * @s: Pointer to the start of the area. - * @v: The value to fill the area with - * @count: The number of values to store - * - * Differs from memset() in that it fills with a uint64_t instead - * of a byte. Remember that @count is the number of uint64_ts to - * store, not the number of bytes. - */ -void *memset64(uint64_t *s, uint64_t v, size_t count) -{ - uint64_t *xs = s; - - while (count--) - *xs++ = v; - return s; -} -EXPORT_SYMBOL(memset64); -#endif - #ifndef __HAVE_ARCH_MEMCPY /** * memcpy - Copy one area of memory to another diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 1a0d1e771e6c..b1495f586f29 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -3983,8 +3983,8 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_LD_IMM64(R0, 0), BPF_LD_IMM64(R1, 0xffffffffffffffffLL), - BPF_STX_MEM(BPF_DW, R10, R1, -40), - BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), BPF_EXIT_INSN(), }, INTERNAL, @@ -4556,7 +4556,7 @@ static struct bpf_test tests[] = { { }, INTERNAL, { 0x34 }, - { { ETH_HLEN, 0xbef } }, + { { 1, 0xbef } }, .fill_helper = bpf_fill_ld_abs_vlan_push_pop, }, /* @@ -5399,14 +5399,7 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) u64 duration; u32 ret; - /* - * NOTE: Several sub-tests may be present, in which case - * a zero {data_size, result} tuple indicates the end of - * the sub-test array. The first test is always run, - * even if both data_size and result happen to be zero. - */ - if (i > 0 && - test->test[i].data_size == 0 && + if (test->test[i].data_size == 0 && test->test[i].result == 0) break; diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c index 2c5197d6b944..08c3c8049998 100644 --- a/lib/xz/xz_dec_lzma2.c +++ b/lib/xz/xz_dec_lzma2.c @@ -387,14 +387,7 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, *left -= copy_size; - /* - * If doing in-place decompression in single-call mode and the - * uncompressed size of the file is larger than the caller - * thought (i.e. it is invalid input!), the buffers below may - * overlap and cause undefined behavior with memcpy(). - * With valid inputs memcpy() would be fine here. - */ - memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size); + memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); dict->pos += copy_size; if (dict->full < dict->pos) @@ -404,11 +397,7 @@ static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, if (dict->pos == dict->end) dict->pos = 0; - /* - * Like above but for multi-call mode: use memmove() - * to avoid undefined behavior with invalid input. - */ - memmove(b->out + b->out_pos, b->in + b->in_pos, + memcpy(b->out + b->out_pos, b->in + b->in_pos, copy_size); } @@ -432,12 +421,6 @@ static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) if (dict->pos == dict->end) dict->pos = 0; - /* - * These buffers cannot overlap even if doing in-place - * decompression because in multi-call mode dict->buf - * has been allocated by us in this file; it's not - * provided by the caller like in single-call mode. - */ memcpy(b->out + b->out_pos, dict->buf + dict->start, copy_size); } diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c index 9e5b9ab537fe..ac809b1e64f7 100644 --- a/lib/xz/xz_dec_stream.c +++ b/lib/xz/xz_dec_stream.c @@ -402,12 +402,12 @@ static enum xz_ret dec_stream_header(struct xz_dec *s) * we will accept other check types too, but then the check won't * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. */ - if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX) - return XZ_OPTIONS_ERROR; - s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; #ifdef XZ_DEC_ANY_CHECK + if (s->check_type > XZ_CHECK_MAX) + return XZ_OPTIONS_ERROR; + if (s->check_type > XZ_CHECK_CRC32) return XZ_UNSUPPORTED_CHECK; #else diff --git a/localversion-st b/localversion-st deleted file mode 100644 index 580cdefadcc3..000000000000 --- a/localversion-st +++ /dev/null @@ -1 +0,0 @@ --st5 \ No newline at end of file diff --git a/mm/Kconfig b/mm/Kconfig index 0d74e47c4d0e..bdb3111e6971 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -566,7 +566,7 @@ config ZPOOL zsmalloc. config ZBUD - tristate "Low density storage for compressed pages" + tristate "Low (Up to 2x) density storage for compressed pages" default n help A special purpose allocator for storing compressed pages. @@ -575,6 +575,16 @@ config ZBUD deterministic reclaim properties that make it preferable to a higher density approach when reclaim will be used. +config Z3FOLD + tristate "Up to 3x density storage for compressed pages" + depends on ZPOOL + default n + help + A special purpose allocator for storing compressed pages. + It is designed to store up to three compressed pages per physical + page. It is a ZBUD derivative so the simplicity and determinism are + still there. + config ZSMALLOC tristate "Memory allocator for compressed pages" depends on MMU diff --git a/mm/Makefile b/mm/Makefile index 04d48b46dbe9..0919d3bea38b 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o +obj-$(CONFIG_Z3FOLD) += z3fold.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1974b9225982..b12a49bf78de 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -21,7 +21,6 @@ struct backing_dev_info noop_backing_dev_info = { EXPORT_SYMBOL_GPL(noop_backing_dev_info); static struct class *bdi_class; -const char *bdi_unknown_name = "(unknown)"; /* * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side @@ -883,13 +882,6 @@ void bdi_unregister(struct backing_dev_info *bdi) wb_shutdown(&bdi->wb); cgwb_bdi_destroy(bdi); - /* - * If this BDI's min ratio has been set, use bdi_set_min_ratio() to - * update the global bdi_min_ratio. - */ - if (bdi->min_ratio) - bdi_set_min_ratio(bdi, 0); - if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); diff --git a/mm/gup.c b/mm/gup.c index c80cdc408228..4c5857889e9d 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -59,22 +59,13 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, } /* - * FOLL_FORCE or a forced COW break can write even to unwritable pte's, - * but only after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE can write to even unwritable pte's, but only + * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { - return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); -} - -/* - * A (separate) COW fault might break the page the other way and - * get_user_pages() would return the page from what is now the wrong - * VM. So we need to force a COW break at GUP time even for reads. - */ -static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) -{ - return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); + return pte_write(pte) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, @@ -518,18 +509,12 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (!vma || check_vma_flags(vma, gup_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { - if (should_force_cow_break(vma, foll_flags)) - foll_flags |= FOLL_WRITE; i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, - foll_flags); + gup_flags); continue; } } - - if (should_force_cow_break(vma, foll_flags)) - foll_flags |= FOLL_WRITE; - retry: /* * If we have a pending SIGKILL, don't keep faulting pages and @@ -1361,10 +1346,6 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. - * - * Careful, careful! COW breaking can go either way, so a non-write - * access can get ambiguous page results. If you call this function without - * 'write' set, you'd better be sure that you're ok with that ambiguity. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) @@ -1394,12 +1375,6 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. - * - * NOTE! We allow read-only gup_fast() here, but you'd better be - * careful about possible COW pages. You'll get _a_ COW page, but - * not necessarily the one you intended to get depending on what - * COW event happens after this. COW may break the page copy in a - * random direction. */ local_irq_save(flags); @@ -1410,22 +1385,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; - /* - * The FAST_GUP case requires FOLL_WRITE even for pure reads, - * because get_user_pages() may need to cause an early COW in - * order to avoid confusing the normal COW routines. So only - * targets that are already writable are safe to do by just - * looking at the page tables. - */ if (unlikely(pgd_huge(pgd))) { - if (!gup_huge_pgd(pgd, pgdp, addr, next, 1, + if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, &nr)) break; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, - PGDIR_SHIFT, next, 1, pages, &nr)) + PGDIR_SHIFT, next, write, pages, &nr)) break; - } else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) + } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f7c63bd6506e..a0616b419d29 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -823,6 +823,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pgtable_t pgtable; struct page *zero_page; + bool set; int ret; pgtable = pte_alloc_one(mm, haddr); if (unlikely(!pgtable)) @@ -835,11 +836,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, } ptl = pmd_lock(mm, pmd); ret = 0; + set = false; if (pmd_none(*pmd)) { if (userfaultfd_missing(vma)) { spin_unlock(ptl); - pte_free(mm, pgtable); - put_huge_zero_page(); ret = handle_userfault(vma, address, flags, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); @@ -848,9 +848,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, haddr, pmd, zero_page); spin_unlock(ptl); + set = true; } - } else { + } else spin_unlock(ptl); + if (!set) { pte_free(mm, pgtable); put_huge_zero_page(); } @@ -1267,12 +1269,13 @@ out_unlock: } /* - * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, - * but only after we've gone through a COW cycle and they are dirty. + * FOLL_FORCE can write to even unwritable pmd's, but only + * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) { - return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); + return pmd_write(pmd) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); } struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, @@ -1339,6 +1342,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, bool was_writable; int flags = 0; + /* A PROT_NONE fault should not end up here */ + BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); + ptl = pmd_lock(mm, pmdp); if (unlikely(!pmd_same(pmd, *pmdp))) goto out_unlock; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3184845236ee..44970b17f4fe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -66,21 +66,6 @@ DEFINE_SPINLOCK(hugetlb_lock); static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; -static inline bool PageHugeFreed(struct page *head) -{ - return page_private(head + 4) == -1UL; -} - -static inline void SetPageHugeFreed(struct page *head) -{ - set_page_private(head + 4, -1UL); -} - -static inline void ClearPageHugeFreed(struct page *head) -{ - set_page_private(head + 4, 0); -} - /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); @@ -856,7 +841,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; - SetPageHugeFreed(page); } static struct page *dequeue_huge_page_node(struct hstate *h, int nid) @@ -874,7 +858,6 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid) return NULL; list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); - ClearPageHugeFreed(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; @@ -1159,16 +1142,14 @@ static inline int alloc_fresh_gigantic_page(struct hstate *h, static void update_and_free_page(struct hstate *h, struct page *page) { int i; - struct page *subpage = page; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return; h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; - for (i = 0; i < pages_per_huge_page(h); - i++, subpage = mem_map_next(subpage, page, i)) { - subpage->flags &= ~(1 << PG_locked | 1 << PG_error | + for (i = 0; i < pages_per_huge_page(h); i++) { + page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_private | 1 << PG_writeback); @@ -1203,11 +1184,12 @@ struct hstate *size_to_hstate(unsigned long size) */ bool page_huge_active(struct page *page) { - return PageHeadHuge(page) && PagePrivate(&page[1]); + VM_BUG_ON_PAGE(!PageHuge(page), page); + return PageHead(page) && PagePrivate(&page[1]); } /* never called for tail page */ -void set_page_huge_active(struct page *page) +static void set_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); SetPagePrivate(&page[1]); @@ -1285,7 +1267,6 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) set_hugetlb_cgroup(page, NULL); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; - ClearPageHugeFreed(page); spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ } @@ -1444,32 +1425,11 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, */ static void dissolve_free_huge_page(struct page *page) { -retry: spin_lock(&hugetlb_lock); if (PageHuge(page) && !page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); - - /* - * We should make sure that the page is already on the free list - * when it is dissolved. - */ - if (unlikely(!PageHugeFreed(head))) { - spin_unlock(&hugetlb_lock); - cond_resched(); - - /* - * Theoretically, we should return -EBUSY when we - * encounter this race. In fact, we have a chance - * to successfully dissolve the page if we do a - * retry. Because the race window is quite small. - * If we seize this opportunity, it is an optimization - * for increasing the success rate of dissolving page. - */ - goto retry; - } - list_del(&head->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; @@ -2526,10 +2486,8 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); - if (retval) { + if (retval) kobject_put(hstate_kobjs[hi]); - hstate_kobjs[hi] = NULL; - } return retval; } @@ -3273,19 +3231,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, struct page *page; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); - unsigned long mmun_start = start; /* For mmu_notifiers */ - unsigned long mmun_end = end; /* For mmu_notifiers */ + const unsigned long mmun_start = start; /* For mmu_notifiers */ + const unsigned long mmun_end = end; /* For mmu_notifiers */ WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); tlb_start_vma(tlb, vma); - - /* - * If sharing possible, alert mmu notifiers of worst case. - */ - adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); address = start; again: @@ -3295,11 +3248,8 @@ again: continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) { - tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); - force_flush = 1; + if (huge_pmd_unshare(mm, &address, ptep)) goto unlock; - } pte = huge_ptep_get(ptep); if (huge_pte_none(pte)) @@ -3392,23 +3342,12 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, { struct mm_struct *mm; struct mmu_gather tlb; - unsigned long tlb_start = start; - unsigned long tlb_end = end; - - /* - * If shared PMDs were possibly used within this vma range, adjust - * start/end for worst case tlb flushing. - * Note that we can not be sure if PMDs are shared until we try to - * unmap pages. However, we want to make sure TLB flushing covers - * the largest possible range. - */ - adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); mm = vma->vm_mm; - tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end); + tlb_gather_mmu(&tlb, mm, start, end); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); - tlb_finish_mmu(&tlb, tlb_start, tlb_end); + tlb_finish_mmu(&tlb, start, end); } /* @@ -3720,7 +3659,7 @@ retry: * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { - ret = VM_FAULT_HWPOISON_LARGE | + ret = VM_FAULT_HWPOISON | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } @@ -3790,7 +3729,7 @@ backout_unlocked: #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx) + pgoff_t idx, unsigned long address) { unsigned long key[2]; u32 hash; @@ -3798,7 +3737,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, key[0] = (unsigned long) mapping; key[1] = idx; - hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); + hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); return hash & (num_fault_mutexes - 1); } @@ -3808,7 +3747,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, - pgoff_t idx) + pgoff_t idx, unsigned long address) { return 0; } @@ -3853,7 +3792,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ - hash = hugetlb_fault_mutex_hash(h, mapping, idx); + hash = hugetlb_fault_mutex_hash(h, mapping, idx, address); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); @@ -4084,21 +4023,11 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, pte_t pte; struct hstate *h = hstate_vma(vma); unsigned long pages = 0; - unsigned long f_start = start; - unsigned long f_end = end; - bool shared_pmd = false; - - /* - * In the case of shared PMDs, the area to flush could be beyond - * start/end. Set f_start/f_end to cover the maximum possible - * range if PMD sharing is possible. - */ - adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end); BUG_ON(address >= end); - flush_cache_range(vma, f_start, f_end); + flush_cache_range(vma, address, end); - mmu_notifier_invalidate_range_start(mm, f_start, f_end); + mmu_notifier_invalidate_range_start(mm, start, end); i_mmap_lock_write(vma->vm_file->f_mapping); for (; address < end; address += huge_page_size(h)) { spinlock_t *ptl; @@ -4109,7 +4038,6 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, if (huge_pmd_unshare(mm, &address, ptep)) { pages++; spin_unlock(ptl); - shared_pmd = true; continue; } pte = huge_ptep_get(ptep); @@ -4144,18 +4072,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * may have cleared our pud entry and done put_page on the page table: * once we release i_mmap_rwsem, another task can do the final put_page - * and that page table be reused and filled with junk. If we actually - * did unshare a page of pmds, flush the range corresponding to the pud. + * and that page table be reused and filled with junk. */ - if (shared_pmd) { - flush_tlb_range(vma, f_start, f_end); - mmu_notifier_invalidate_range(mm, f_start, f_end); - } else { - flush_tlb_range(vma, start, end); - mmu_notifier_invalidate_range(mm, start, end); - } + flush_tlb_range(vma, start, end); + mmu_notifier_invalidate_range(mm, start, end); i_mmap_unlock_write(vma->vm_file->f_mapping); - mmu_notifier_invalidate_range_end(mm, f_start, f_end); + mmu_notifier_invalidate_range_end(mm, start, end); return pages << h->order; } @@ -4358,23 +4280,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), - v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); + unsigned long a_start, a_end; - /* - * vma need span at least one aligned PUD size and the start,end range - * must at least partialy within it. - */ - if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || - (*end <= v_start) || (*start >= v_end)) + if (!(vma->vm_flags & VM_MAYSHARE)) return; /* Extend the range to be PUD aligned for a worst case scenario */ - if (*start > v_start) - *start = ALIGN_DOWN(*start, PUD_SIZE); + a_start = ALIGN_DOWN(*start, PUD_SIZE); + a_end = ALIGN(*end, PUD_SIZE); - if (*end < v_end) - *end = ALIGN(*end, PUD_SIZE); + /* + * Intersect the range with the vma range, since pmd sharing won't be + * across vma after all + */ + *start = max(vma->vm_start, a_start); + *end = min(vma->vm_end, a_end); } /* @@ -4624,9 +4544,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list) { bool ret = true; + VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); - if (!PageHeadHuge(page) || !page_huge_active(page) || - !get_page_unless_zero(page)) { + if (!page_huge_active(page) || !get_page_unless_zero(page)) { ret = false; goto unlock; } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index d68679106731..ff0390823e04 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1403,7 +1403,7 @@ static void kmemleak_scan(void) if (page_count(page) == 0) continue; scan_block(page, page + 1, NULL); - if (!(pfn & 63)) + if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) cond_resched(); } } diff --git a/mm/ksm.c b/mm/ksm.c index 2a4ef426b331..3cd071cdfbb3 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -651,7 +651,6 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ksm_pages_shared--; put_anon_vma(rmap_item->anon_vma); - rmap_item->head = NULL; rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { diff --git a/mm/memblock.c b/mm/memblock.c index 6d3cd9e38c41..e39ef2fe5c17 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -193,6 +193,14 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * * Find @size free area aligned to @align in the specified range and node. * + * When allocation direction is bottom-up, the @start should be greater + * than the end of the kernel image. Otherwise, it will be trimmed. The + * reason is that we want the bottom-up allocation just near the kernel + * image so it is highly likely that the allocated memory and the kernel + * will reside in the same node. + * + * If bottom-up allocation failed, will try to allocate memory top-down. + * * RETURNS: * Found address on success, 0 on failure. */ @@ -200,6 +208,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid, ulong flags) { + phys_addr_t kernel_end, ret; + /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE) end = memblock.current_limit; @@ -207,12 +217,39 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, /* avoid allocating the first page */ start = max_t(phys_addr_t, start, PAGE_SIZE); end = max(start, end); - if (memblock_bottom_up()) - return __memblock_find_range_bottom_up(start, end, size, align, - nid, flags); - else - return __memblock_find_range_top_down(start, end, size, align, - nid, flags); + kernel_end = __pa_symbol(_end); + + /* + * try bottom-up allocation only when bottom-up mode + * is set and @end is above the kernel image. + */ + if (memblock_bottom_up() && end > kernel_end) { + phys_addr_t bottom_up_start; + + /* make sure we will allocate above the kernel */ + bottom_up_start = max(start, kernel_end); + + /* ok, try bottom-up allocation first */ + ret = __memblock_find_range_bottom_up(bottom_up_start, end, + size, align, nid, flags); + if (ret) + return ret; + + /* + * we always limit bottom-up allocation above the kernel, + * but top-down allocation doesn't have the limit, so + * retrying top-down allocation may succeed when bottom-up + * allocation failed. + * + * bottom-up allocation is expected to be fail very rarely, + * so we use WARN_ONCE() here to see the stack trace if + * fail happens. + */ + WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); + } + + return __memblock_find_range_top_down(start, end, size, align, nid, + flags); } /** diff --git a/mm/memory.c b/mm/memory.c index 09a57fe6ae01..5dfc9fac8b74 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -129,7 +129,7 @@ static int __init init_zero_pfn(void) zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } -early_initcall(init_zero_pfn); +core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) @@ -1686,11 +1686,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { - pte_t *pte, *mapped_pte; + pte_t *pte; spinlock_t *ptl; int err = 0; - mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); + pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); @@ -1704,7 +1704,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(mapped_pte, ptl); + pte_unmap_unlock(pte - 1, ptl); return err; } @@ -3210,6 +3210,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, bool was_writable = pte_write(pte); int flags = 0; + /* A PROT_NONE fault should not end up here */ + BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); + /* * The "pte" at this point cannot be used safely without * validation through pte_unmap_same(). It's of NUMA type but @@ -3302,11 +3305,6 @@ static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_FALLBACK; } -static inline bool vma_is_accessible(struct vm_area_struct *vma) -{ - return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); -} - /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most @@ -3353,7 +3351,7 @@ static int handle_pte_fault(struct mm_struct *mm, pte, pmd, flags, entry); } - if (pte_protnone(entry) && vma_is_accessible(vma)) + if (pte_protnone(entry)) return do_numa_page(mm, vma, address, entry, pte, pmd); ptl = pte_lockptr(mm, pmd); @@ -3428,7 +3426,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (pmd_trans_splitting(orig_pmd)) return 0; - if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) + if (pmd_protnone(orig_pmd)) return do_huge_pmd_numa_page(mm, vma, address, orig_pmd, pmd); diff --git a/mm/mmap.c b/mm/mmap.c index 4993ec670d9e..6e5a79d8ba43 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1349,6 +1349,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, *populate = 0; + while (file && (file->f_mode & FMODE_NONMAPPABLE)) + file = file->f_op->get_lower_file(file); + if (!len) return -EINVAL; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6d614ea554af..edaf64a378cb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -591,6 +591,11 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, * space under its control. */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); + trace_oom_sigkill(victim->pid, victim->comm, + victim_points, + get_mm_rss(victim->mm), + oc->gfp_mask); + mark_oom_victim(victim); pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), @@ -760,9 +765,6 @@ void pagefault_out_of_memory(void) if (mem_cgroup_oom_synchronize(true)) return; - if (fatal_signal_pending(current)) - return; - if (!mutex_trylock(&oom_lock)) return; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cf12b4325559..710ca2b7a8d0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -13,6 +13,11 @@ * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2013 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #include #include @@ -724,7 +729,7 @@ static inline void __free_one_page(struct page *page, struct page *buddy; unsigned int max_order; - max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); + max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); @@ -739,7 +744,7 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON_PAGE(bad_range(zone, page), page); continue_merging: - while (order < max_order) { + while (order < max_order - 1) { buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (!page_is_buddy(page, buddy, order)) @@ -760,7 +765,7 @@ continue_merging: page_idx = combined_idx; order++; } - if (order < MAX_ORDER - 1) { + if (max_order < MAX_ORDER) { /* If we are here, it means order is >= pageblock_order. * We want to prevent merge between freepages on isolate * pageblock and normal pageblock. Without this, pageblock @@ -781,7 +786,7 @@ continue_merging: is_migrate_isolate(buddy_mt))) goto done_merging; } - max_order = order + 1; + max_order++; goto continue_merging; } @@ -2838,6 +2843,7 @@ void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n", current->comm, order, gfp_mask); + trace_mm_page_alloc_fail(order, gfp_mask); dump_stack(); if (!should_suppress_show_mem()) show_mem(filter); @@ -3401,6 +3407,9 @@ retry_cpuset: kmemcheck_pagealloc_alloc(page, order, gfp_mask); trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); + if (order > 1) + trace_mm_page_alloc_highorder(page, order, + alloc_mask, ac.migratetype); out: /* diff --git a/mm/page_io.c b/mm/page_io.c index ab92cd559404..b995a5ba5e8f 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -32,6 +32,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio = bio_alloc(gfp_flags, 1); if (bio) { bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); + bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; bio_add_page(bio, page, PAGE_SIZE, 0); @@ -243,6 +244,11 @@ out: return ret; } +static sector_t swap_page_sector(struct page *page) +{ + return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); +} + int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func) { @@ -291,8 +297,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, return ret; } - ret = bdev_write_page(sis->bdev, map_swap_page(page, &sis->bdev), - page, wbc); + ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); if (!ret) { count_vm_event(PSWPOUT); return 0; @@ -340,7 +345,7 @@ int swap_readpage(struct page *page) return ret; } - ret = bdev_read_page(sis->bdev, map_swap_page(page, &sis->bdev), page); + ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); if (!ret) { count_vm_event(PSWPIN); return 0; diff --git a/mm/slab.h b/mm/slab.h index 5d5402740270..66118e967e04 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -133,7 +133,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_NOTRACK) #else -#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) +#define SLAB_CACHE_FLAGS (0) #endif #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) diff --git a/mm/slub.c b/mm/slub.c index 58053e0ef45a..eb5853fcf622 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5560,8 +5560,10 @@ static int sysfs_slab_add(struct kmem_cache *s) s->kobj.kset = cache_kset(s); err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); - if (err) + if (err) { + kobject_put(&s->kobj); goto out; + } err = sysfs_create_group(&s->kobj, &slab_attr_group); if (err) diff --git a/mm/swapfile.c b/mm/swapfile.c index 204224d8e0d1..feb9abb8e8cc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1710,7 +1710,7 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev) { swp_entry_t entry; entry.val = page_private(page); - return map_swap_entry(entry, bdev) << (PAGE_SHIFT - 9); + return map_swap_entry(entry, bdev); } /* diff --git a/mm/vmstat.c b/mm/vmstat.c index c15e39e57006..3a07628297da 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -967,9 +967,6 @@ static void pagetypeinfo_showfree_print(struct seq_file *m, list_for_each(curr, &area->free_list[mtype]) freecount++; seq_printf(m, "%6lu ", freecount); - spin_unlock_irq(&zone->lock); - cond_resched(); - spin_lock_irq(&zone->lock); } seq_putc(m, '\n'); } @@ -1352,7 +1349,7 @@ static int vmstat_show(struct seq_file *m, void *arg) unsigned long off = l - (unsigned long *)m->private; seq_puts(m, vmstat_text[off]); - seq_put_decimal_ull(m, " ", *l); + seq_put_decimal_ull(m, ' ', *l); seq_putc(m, '\n'); return 0; } diff --git a/mm/z3fold.c b/mm/z3fold.c new file mode 100644 index 000000000000..189267ca1b33 --- /dev/null +++ b/mm/z3fold.c @@ -0,0 +1,1108 @@ +/* + * z3fold.c + * + * This implementation is based on zbud written by Seth Jennings. + * + * z3fold is an special purpose allocator for storing compressed pages. It + * can store up to three compressed pages per page which improves the + * compression ratio of zbud while retaining its main concepts (e. g. always + * storing an integral number of objects per page) and simplicity. + * It still has simple and deterministic reclaim properties that make it + * preferable to a higher density approach (with no requirement on integral + * number of object per page) when reclaim is used. + * + * As in zbud, pages are divided into "chunks". The size of the chunks is + * fixed at compile time and is determined by NCHUNKS_ORDER below. + * + * z3fold doesn't export any API and is meant to be used via zpool API. + */ +/* + * Copyright (C) 2016 Sony Mobile Communications Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/***************** + * Structures +*****************/ +struct z3fold_pool; +struct z3fold_ops { + int (*evict)(struct z3fold_pool *pool, unsigned long handle); +}; + +enum buddy { + HEADLESS = 0, + FIRST, + MIDDLE, + LAST, + BUDDIES_MAX +}; + +/* + * struct z3fold_header - z3fold page metadata occupying first chunks of each + * z3fold page, except for HEADLESS pages + * @buddy: links the z3fold page into the relevant list in the + * pool + * @page_lock: per-page lock + * @refcount: reference count for the z3fold page + * @work: work_struct for page layout optimization + * @pool: pointer to the pool which this page belongs to + * @cpu: CPU which this page "belongs" to + * @first_chunks: the size of the first buddy in chunks, 0 if free + * @middle_chunks: the size of the middle buddy in chunks, 0 if free + * @last_chunks: the size of the last buddy in chunks, 0 if free + * @first_num: the starting number (for the first handle) + */ +struct z3fold_header { + struct list_head buddy; + spinlock_t page_lock; + struct kref refcount; + struct work_struct work; + struct z3fold_pool *pool; + short cpu; + unsigned short first_chunks; + unsigned short middle_chunks; + unsigned short last_chunks; + unsigned short start_middle; + unsigned short first_num:2; +}; + +/* + * NCHUNKS_ORDER determines the internal allocation granularity, effectively + * adjusting internal fragmentation. It also determines the number of + * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the + * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks + * in the beginning of an allocated page are occupied by z3fold header, so + * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), + * which shows the max number of free chunks in z3fold page, also there will + * be 63, or 62, respectively, freelists per pool. + */ +#define NCHUNKS_ORDER 6 + +#define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) +#define CHUNK_SIZE (1 << CHUNK_SHIFT) +#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) +#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) +#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) +#define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) + +#define BUDDY_MASK (0x3) + +/** + * struct z3fold_pool - stores metadata for each z3fold pool + * @name: pool name + * @lock: protects pool unbuddied/lru lists + * @stale_lock: protects pool stale page list + * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- + * buddies; the list each z3fold page is added to depends on + * the size of its free region. + * @lru: list tracking the z3fold pages in LRU order by most recently + * added buddy. + * @stale: list of pages marked for freeing + * @pages_nr: number of z3fold pages in the pool. + * @ops: pointer to a structure of user defined operations specified at + * pool creation time. + * @compact_wq: workqueue for page layout background optimization + * @release_wq: workqueue for safe page release + * @work: work_struct for safe page release + * + * This structure is allocated at pool creation time and maintains metadata + * pertaining to a particular z3fold pool. + */ +struct z3fold_pool { + const char *name; + spinlock_t lock; + spinlock_t stale_lock; + struct list_head *unbuddied; + struct list_head lru; + struct list_head stale; + atomic64_t pages_nr; + const struct z3fold_ops *ops; + struct zpool *zpool; + const struct zpool_ops *zpool_ops; + struct workqueue_struct *compact_wq; + struct workqueue_struct *release_wq; + struct work_struct work; +}; + +/* + * Internal z3fold page flags + */ +enum z3fold_page_flags { + PAGE_HEADLESS = 0, + MIDDLE_CHUNK_MAPPED, + NEEDS_COMPACTING, + PAGE_STALE +}; + +/***************** + * Helpers +*****************/ + +/* Converts an allocation size in bytes to size in z3fold chunks */ +static int size_to_chunks(size_t size) +{ + return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; +} + +#define for_each_unbuddied_list(_iter, _begin) \ + for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) + +static void compact_page_work(struct work_struct *w); + +/* Initializes the z3fold header of a newly allocated z3fold page */ +static struct z3fold_header *init_z3fold_page(struct page *page, + struct z3fold_pool *pool) +{ + struct z3fold_header *zhdr = page_address(page); + + INIT_LIST_HEAD(&page->lru); + clear_bit(PAGE_HEADLESS, &page->private); + clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); + clear_bit(NEEDS_COMPACTING, &page->private); + clear_bit(PAGE_STALE, &page->private); + + spin_lock_init(&zhdr->page_lock); + kref_init(&zhdr->refcount); + zhdr->first_chunks = 0; + zhdr->middle_chunks = 0; + zhdr->last_chunks = 0; + zhdr->first_num = 0; + zhdr->start_middle = 0; + zhdr->cpu = -1; + zhdr->pool = pool; + INIT_LIST_HEAD(&zhdr->buddy); + INIT_WORK(&zhdr->work, compact_page_work); + return zhdr; +} + +/* Resets the struct page fields and frees the page */ +static void free_z3fold_page(struct page *page) +{ + __free_page(page); +} + +/* Lock a z3fold page */ +static inline void z3fold_page_lock(struct z3fold_header *zhdr) +{ + spin_lock(&zhdr->page_lock); +} + +/* Try to lock a z3fold page */ +static inline int z3fold_page_trylock(struct z3fold_header *zhdr) +{ + return spin_trylock(&zhdr->page_lock); +} + +/* Unlock a z3fold page */ +static inline void z3fold_page_unlock(struct z3fold_header *zhdr) +{ + spin_unlock(&zhdr->page_lock); +} + +/* + * Encodes the handle of a particular buddy within a z3fold page + * Pool lock should be held as this function accesses first_num + */ +static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) +{ + unsigned long handle; + + handle = (unsigned long)zhdr; + if (bud != HEADLESS) + handle += (bud + zhdr->first_num) & BUDDY_MASK; + return handle; +} + +/* Returns the z3fold page where a given handle is stored */ +static struct z3fold_header *handle_to_z3fold_header(unsigned long handle) +{ + return (struct z3fold_header *)(handle & PAGE_MASK); +} + +/* Returns buddy number */ +static enum buddy handle_to_buddy(unsigned long handle) +{ + struct z3fold_header *zhdr = handle_to_z3fold_header(handle); + return (handle - zhdr->first_num) & BUDDY_MASK; +} + +static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) +{ + struct page *page = virt_to_page(zhdr); + struct z3fold_pool *pool = zhdr->pool; + + WARN_ON(!list_empty(&zhdr->buddy)); + set_bit(PAGE_STALE, &page->private); + clear_bit(NEEDS_COMPACTING, &page->private); + spin_lock(&pool->lock); + if (!list_empty(&page->lru)) + list_del(&page->lru); + spin_unlock(&pool->lock); + if (locked) + z3fold_page_unlock(zhdr); + spin_lock(&pool->stale_lock); + list_add(&zhdr->buddy, &pool->stale); + queue_work(pool->release_wq, &pool->work); + spin_unlock(&pool->stale_lock); +} + +static void __attribute__((__unused__)) + release_z3fold_page(struct kref *ref) +{ + struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, + refcount); + __release_z3fold_page(zhdr, false); +} + +static void release_z3fold_page_locked(struct kref *ref) +{ + struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, + refcount); + WARN_ON(z3fold_page_trylock(zhdr)); + __release_z3fold_page(zhdr, true); +} + +static void release_z3fold_page_locked_list(struct kref *ref) +{ + struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, + refcount); + spin_lock(&zhdr->pool->lock); + list_del_init(&zhdr->buddy); + spin_unlock(&zhdr->pool->lock); + + WARN_ON(z3fold_page_trylock(zhdr)); + __release_z3fold_page(zhdr, true); +} + +static void free_pages_work(struct work_struct *w) +{ + struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); + + spin_lock(&pool->stale_lock); + while (!list_empty(&pool->stale)) { + struct z3fold_header *zhdr = list_first_entry(&pool->stale, + struct z3fold_header, buddy); + struct page *page = virt_to_page(zhdr); + + list_del(&zhdr->buddy); + if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) + continue; + spin_unlock(&pool->stale_lock); + cancel_work_sync(&zhdr->work); + free_z3fold_page(page); + cond_resched(); + spin_lock(&pool->stale_lock); + } + spin_unlock(&pool->stale_lock); +} + +/* + * Returns the number of free chunks in a z3fold page. + * NB: can't be used with HEADLESS pages. + */ +static int num_free_chunks(struct z3fold_header *zhdr) +{ + int nfree; + /* + * If there is a middle object, pick up the bigger free space + * either before or after it. Otherwise just subtract the number + * of chunks occupied by the first and the last objects. + */ + if (zhdr->middle_chunks != 0) { + int nfree_before = zhdr->first_chunks ? + 0 : zhdr->start_middle - ZHDR_CHUNKS; + int nfree_after = zhdr->last_chunks ? + 0 : TOTAL_CHUNKS - + (zhdr->start_middle + zhdr->middle_chunks); + nfree = max(nfree_before, nfree_after); + } else + nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; + return nfree; +} + +static inline void *mchunk_memmove(struct z3fold_header *zhdr, + unsigned short dst_chunk) +{ + void *beg = zhdr; + return memmove(beg + (dst_chunk << CHUNK_SHIFT), + beg + (zhdr->start_middle << CHUNK_SHIFT), + zhdr->middle_chunks << CHUNK_SHIFT); +} + +#define BIG_CHUNK_GAP 3 +/* Has to be called with lock held */ +static int z3fold_compact_page(struct z3fold_header *zhdr) +{ + struct page *page = virt_to_page(zhdr); + + if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) + return 0; /* can't move middle chunk, it's used */ + + if (zhdr->middle_chunks == 0) + return 0; /* nothing to compact */ + + if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { + /* move to the beginning */ + mchunk_memmove(zhdr, ZHDR_CHUNKS); + zhdr->first_chunks = zhdr->middle_chunks; + zhdr->middle_chunks = 0; + zhdr->start_middle = 0; + zhdr->first_num++; + return 1; + } + + /* + * moving data is expensive, so let's only do that if + * there's substantial gain (at least BIG_CHUNK_GAP chunks) + */ + if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && + zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= + BIG_CHUNK_GAP) { + mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); + zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; + return 1; + } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && + TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle + + zhdr->middle_chunks) >= + BIG_CHUNK_GAP) { + unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - + zhdr->middle_chunks; + mchunk_memmove(zhdr, new_start); + zhdr->start_middle = new_start; + return 1; + } + + return 0; +} + +static void do_compact_page(struct z3fold_header *zhdr, bool locked) +{ + struct z3fold_pool *pool = zhdr->pool; + struct page *page; + struct list_head *unbuddied; + int fchunks; + + page = virt_to_page(zhdr); + if (locked) + WARN_ON(z3fold_page_trylock(zhdr)); + else + z3fold_page_lock(zhdr); + if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { + z3fold_page_unlock(zhdr); + return; + } + spin_lock(&pool->lock); + list_del_init(&zhdr->buddy); + spin_unlock(&pool->lock); + + if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { + atomic64_dec(&pool->pages_nr); + return; + } + + z3fold_compact_page(zhdr); + unbuddied = get_cpu_ptr(pool->unbuddied); + fchunks = num_free_chunks(zhdr); + if (fchunks < NCHUNKS && + (!zhdr->first_chunks || !zhdr->middle_chunks || + !zhdr->last_chunks)) { + /* the page's not completely free and it's unbuddied */ + spin_lock(&pool->lock); + list_add(&zhdr->buddy, &unbuddied[fchunks]); + spin_unlock(&pool->lock); + zhdr->cpu = smp_processor_id(); + } + put_cpu_ptr(pool->unbuddied); + z3fold_page_unlock(zhdr); +} + +static void compact_page_work(struct work_struct *w) +{ + struct z3fold_header *zhdr = container_of(w, struct z3fold_header, + work); + + do_compact_page(zhdr, false); +} + + +/* + * API Functions + */ + +/** + * z3fold_create_pool() - create a new z3fold pool + * @name: pool name + * @gfp: gfp flags when allocating the z3fold pool structure + * @ops: user-defined operations for the z3fold pool + * + * Return: pointer to the new z3fold pool or NULL if the metadata allocation + * failed. + */ +static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, + const struct z3fold_ops *ops) +{ + struct z3fold_pool *pool = NULL; + int i, cpu; + + pool = kzalloc(sizeof(struct z3fold_pool), gfp); + if (!pool) + goto out; + spin_lock_init(&pool->lock); + spin_lock_init(&pool->stale_lock); + pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); + for_each_possible_cpu(cpu) { + struct list_head *unbuddied = + per_cpu_ptr(pool->unbuddied, cpu); + for_each_unbuddied_list(i, 0) + INIT_LIST_HEAD(&unbuddied[i]); + } + INIT_LIST_HEAD(&pool->lru); + INIT_LIST_HEAD(&pool->stale); + atomic64_set(&pool->pages_nr, 0); + pool->name = name; + pool->compact_wq = create_singlethread_workqueue(pool->name); + if (!pool->compact_wq) + goto out; + pool->release_wq = create_singlethread_workqueue(pool->name); + if (!pool->release_wq) + goto out_wq; + INIT_WORK(&pool->work, free_pages_work); + pool->ops = ops; + return pool; + +out_wq: + destroy_workqueue(pool->compact_wq); +out: + kfree(pool); + return NULL; +} + +/** + * z3fold_destroy_pool() - destroys an existing z3fold pool + * @pool: the z3fold pool to be destroyed + * + * The pool should be emptied before this function is called. + */ +static void z3fold_destroy_pool(struct z3fold_pool *pool) +{ + destroy_workqueue(pool->release_wq); + destroy_workqueue(pool->compact_wq); + kfree(pool); +} + +/** + * z3fold_alloc() - allocates a region of a given size + * @pool: z3fold pool from which to allocate + * @size: size in bytes of the desired allocation + * @gfp: gfp flags used if the pool needs to grow + * @handle: handle of the new allocation + * + * This function will attempt to find a free region in the pool large enough to + * satisfy the allocation request. A search of the unbuddied lists is + * performed first. If no suitable free region is found, then a new page is + * allocated and added to the pool to satisfy the request. + * + * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used + * as z3fold pool pages. + * + * Return: 0 if success and handle is set, otherwise -EINVAL if the size or + * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate + * a new page. + */ +static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, + unsigned long *handle) +{ + int chunks = 0, i, freechunks; + struct z3fold_header *zhdr = NULL; + struct page *page = NULL; + enum buddy bud; + bool can_sleep = (gfp & __GFP_RECLAIM) == __GFP_RECLAIM; + + if (!size || (gfp & __GFP_HIGHMEM)) + return -EINVAL; + + if (size > PAGE_SIZE) + return -ENOSPC; + + if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) + bud = HEADLESS; + else { + struct list_head *unbuddied; + chunks = size_to_chunks(size); + +lookup: + /* First, try to find an unbuddied z3fold page. */ + unbuddied = get_cpu_ptr(pool->unbuddied); + for_each_unbuddied_list(i, chunks) { + struct list_head *l = &unbuddied[i]; + + zhdr = list_first_entry_or_null(READ_ONCE(l), + struct z3fold_header, buddy); + + if (!zhdr) + continue; + + /* Re-check under lock. */ + spin_lock(&pool->lock); + l = &unbuddied[i]; + if (unlikely(zhdr != list_first_entry(READ_ONCE(l), + struct z3fold_header, buddy)) || + !z3fold_page_trylock(zhdr)) { + spin_unlock(&pool->lock); + put_cpu_ptr(pool->unbuddied); + goto lookup; + } + list_del_init(&zhdr->buddy); + zhdr->cpu = -1; + spin_unlock(&pool->lock); + + page = virt_to_page(zhdr); + if (test_bit(NEEDS_COMPACTING, &page->private)) { + z3fold_page_unlock(zhdr); + zhdr = NULL; + put_cpu_ptr(pool->unbuddied); + if (can_sleep) + cond_resched(); + goto lookup; + } + + /* + * this page could not be removed from its unbuddied + * list while pool lock was held, and then we've taken + * page lock so kref_put could not be called before + * we got here, so it's safe to just call kref_get() + */ + kref_get(&zhdr->refcount); + break; + } + put_cpu_ptr(pool->unbuddied); + + if (zhdr) { + if (zhdr->first_chunks == 0) { + if (zhdr->middle_chunks != 0 && + chunks >= zhdr->start_middle) + bud = LAST; + else + bud = FIRST; + } else if (zhdr->last_chunks == 0) + bud = LAST; + else if (zhdr->middle_chunks == 0) + bud = MIDDLE; + else { + if (kref_put(&zhdr->refcount, + release_z3fold_page_locked)) + atomic64_dec(&pool->pages_nr); + else + z3fold_page_unlock(zhdr); + pr_err("No free chunks in unbuddied\n"); + WARN_ON(1); + goto lookup; + } + goto found; + } + bud = FIRST; + } + + page = NULL; + if (can_sleep) { + spin_lock(&pool->stale_lock); + zhdr = list_first_entry_or_null(&pool->stale, + struct z3fold_header, buddy); + /* + * Before allocating a page, let's see if we can take one from + * the stale pages list. cancel_work_sync() can sleep so we + * limit this case to the contexts where we can sleep + */ + if (zhdr) { + list_del(&zhdr->buddy); + spin_unlock(&pool->stale_lock); + cancel_work_sync(&zhdr->work); + page = virt_to_page(zhdr); + } else { + spin_unlock(&pool->stale_lock); + } + } + if (!page) + page = alloc_page(gfp); + + if (!page) + return -ENOMEM; + + atomic64_inc(&pool->pages_nr); + zhdr = init_z3fold_page(page, pool); + + if (bud == HEADLESS) { + set_bit(PAGE_HEADLESS, &page->private); + goto headless; + } + z3fold_page_lock(zhdr); + +found: + if (bud == FIRST) + zhdr->first_chunks = chunks; + else if (bud == LAST) + zhdr->last_chunks = chunks; + else { + zhdr->middle_chunks = chunks; + zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; + } + + if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || + zhdr->middle_chunks == 0) { + struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); + + /* Add to unbuddied list */ + freechunks = num_free_chunks(zhdr); + spin_lock(&pool->lock); + list_add(&zhdr->buddy, &unbuddied[freechunks]); + spin_unlock(&pool->lock); + zhdr->cpu = smp_processor_id(); + put_cpu_ptr(pool->unbuddied); + } + +headless: + spin_lock(&pool->lock); + /* Add/move z3fold page to beginning of LRU */ + if (!list_empty(&page->lru)) + list_del(&page->lru); + + list_add(&page->lru, &pool->lru); + + *handle = encode_handle(zhdr, bud); + spin_unlock(&pool->lock); + if (bud != HEADLESS) + z3fold_page_unlock(zhdr); + + return 0; +} + +/** + * z3fold_free() - frees the allocation associated with the given handle + * @pool: pool in which the allocation resided + * @handle: handle associated with the allocation returned by z3fold_alloc() + * + * In the case that the z3fold page in which the allocation resides is under + * reclaim, as indicated by the PG_reclaim flag being set, this function + * only sets the first|last_chunks to 0. The page is actually freed + * once both buddies are evicted (see z3fold_reclaim_page() below). + */ +static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) +{ + struct z3fold_header *zhdr; + struct page *page; + enum buddy bud; + + zhdr = handle_to_z3fold_header(handle); + page = virt_to_page(zhdr); + + if (test_bit(PAGE_HEADLESS, &page->private)) { + /* HEADLESS page stored */ + bud = HEADLESS; + } else { + z3fold_page_lock(zhdr); + bud = handle_to_buddy(handle); + + switch (bud) { + case FIRST: + zhdr->first_chunks = 0; + break; + case MIDDLE: + zhdr->middle_chunks = 0; + zhdr->start_middle = 0; + break; + case LAST: + zhdr->last_chunks = 0; + break; + default: + pr_err("%s: unknown bud %d\n", __func__, bud); + WARN_ON(1); + z3fold_page_unlock(zhdr); + return; + } + } + + if (bud == HEADLESS) { + spin_lock(&pool->lock); + list_del(&page->lru); + spin_unlock(&pool->lock); + free_z3fold_page(page); + atomic64_dec(&pool->pages_nr); + return; + } + + if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { + atomic64_dec(&pool->pages_nr); + return; + } + if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { + z3fold_page_unlock(zhdr); + return; + } + if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { + spin_lock(&pool->lock); + list_del_init(&zhdr->buddy); + spin_unlock(&pool->lock); + zhdr->cpu = -1; + kref_get(&zhdr->refcount); + do_compact_page(zhdr, true); + return; + } + kref_get(&zhdr->refcount); + queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); + z3fold_page_unlock(zhdr); +} + +/** + * z3fold_reclaim_page() - evicts allocations from a pool page and frees it + * @pool: pool from which a page will attempt to be evicted + * @retires: number of pages on the LRU list for which eviction will + * be attempted before failing + * + * z3fold reclaim is different from normal system reclaim in that it is done + * from the bottom, up. This is because only the bottom layer, z3fold, has + * information on how the allocations are organized within each z3fold page. + * This has the potential to create interesting locking situations between + * z3fold and the user, however. + * + * To avoid these, this is how z3fold_reclaim_page() should be called: + + * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). + * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and + * call the user-defined eviction handler with the pool and handle as + * arguments. + * + * If the handle can not be evicted, the eviction handler should return + * non-zero. z3fold_reclaim_page() will add the z3fold page back to the + * appropriate list and try the next z3fold page on the LRU up to + * a user defined number of retries. + * + * If the handle is successfully evicted, the eviction handler should + * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() + * contains logic to delay freeing the page if the page is under reclaim, + * as indicated by the setting of the PG_reclaim flag on the underlying page. + * + * If all buddies in the z3fold page are successfully evicted, then the + * z3fold page can be freed. + * + * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are + * no pages to evict or an eviction handler is not registered, -EAGAIN if + * the retry limit was hit. + */ +static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) +{ + int i, ret = 0; + struct z3fold_header *zhdr = NULL; + struct page *page = NULL; + struct list_head *pos; + unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; + + spin_lock(&pool->lock); + if (!pool->ops || !pool->ops->evict || retries == 0) { + spin_unlock(&pool->lock); + return -EINVAL; + } + for (i = 0; i < retries; i++) { + if (list_empty(&pool->lru)) { + spin_unlock(&pool->lock); + return -EINVAL; + } + list_for_each_prev(pos, &pool->lru) { + page = list_entry(pos, struct page, lru); + if (test_bit(PAGE_HEADLESS, &page->private)) + /* candidate found */ + break; + + zhdr = page_address(page); + if (!z3fold_page_trylock(zhdr)) + continue; /* can't evict at this point */ + kref_get(&zhdr->refcount); + list_del_init(&zhdr->buddy); + zhdr->cpu = -1; + } + + list_del_init(&page->lru); + spin_unlock(&pool->lock); + + if (!test_bit(PAGE_HEADLESS, &page->private)) { + /* + * We need encode the handles before unlocking, since + * we can race with free that will set + * (first|last)_chunks to 0 + */ + first_handle = 0; + last_handle = 0; + middle_handle = 0; + if (zhdr->first_chunks) + first_handle = encode_handle(zhdr, FIRST); + if (zhdr->middle_chunks) + middle_handle = encode_handle(zhdr, MIDDLE); + if (zhdr->last_chunks) + last_handle = encode_handle(zhdr, LAST); + /* + * it's safe to unlock here because we hold a + * reference to this page + */ + z3fold_page_unlock(zhdr); + } else { + first_handle = encode_handle(zhdr, HEADLESS); + last_handle = middle_handle = 0; + } + + /* Issue the eviction callback(s) */ + if (middle_handle) { + ret = pool->ops->evict(pool, middle_handle); + if (ret) + goto next; + } + if (first_handle) { + ret = pool->ops->evict(pool, first_handle); + if (ret) + goto next; + } + if (last_handle) { + ret = pool->ops->evict(pool, last_handle); + if (ret) + goto next; + } +next: + spin_lock(&pool->lock); + if (test_bit(PAGE_HEADLESS, &page->private)) { + if (ret == 0) { + spin_unlock(&pool->lock); + free_z3fold_page(page); + return 0; + } + } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { + atomic64_dec(&pool->pages_nr); + spin_unlock(&pool->lock); + return 0; + } + + /* + * Add to the beginning of LRU. + * Pool lock has to be kept here to ensure the page has + * not already been released + */ + list_add(&page->lru, &pool->lru); + } + spin_unlock(&pool->lock); + return -EAGAIN; +} + +/** + * z3fold_map() - maps the allocation associated with the given handle + * @pool: pool in which the allocation resides + * @handle: handle associated with the allocation to be mapped + * + * Extracts the buddy number from handle and constructs the pointer to the + * correct starting chunk within the page. + * + * Returns: a pointer to the mapped allocation + */ +static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) +{ + struct z3fold_header *zhdr; + struct page *page; + void *addr; + enum buddy buddy; + + zhdr = handle_to_z3fold_header(handle); + addr = zhdr; + page = virt_to_page(zhdr); + + if (test_bit(PAGE_HEADLESS, &page->private)) + goto out; + + z3fold_page_lock(zhdr); + buddy = handle_to_buddy(handle); + switch (buddy) { + case FIRST: + addr += ZHDR_SIZE_ALIGNED; + break; + case MIDDLE: + addr += zhdr->start_middle << CHUNK_SHIFT; + set_bit(MIDDLE_CHUNK_MAPPED, &page->private); + break; + case LAST: + addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); + break; + default: + pr_err("unknown buddy id %d\n", buddy); + WARN_ON(1); + addr = NULL; + break; + } + + z3fold_page_unlock(zhdr); +out: + return addr; +} + +/** + * z3fold_unmap() - unmaps the allocation associated with the given handle + * @pool: pool in which the allocation resides + * @handle: handle associated with the allocation to be unmapped + */ +static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) +{ + struct z3fold_header *zhdr; + struct page *page; + enum buddy buddy; + + zhdr = handle_to_z3fold_header(handle); + page = virt_to_page(zhdr); + + if (test_bit(PAGE_HEADLESS, &page->private)) + return; + + z3fold_page_lock(zhdr); + buddy = handle_to_buddy(handle); + if (buddy == MIDDLE) + clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); + z3fold_page_unlock(zhdr); +} + +/** + * z3fold_get_pool_size() - gets the z3fold pool size in pages + * @pool: pool whose size is being queried + * + * Returns: size in pages of the given pool. + */ +static u64 z3fold_get_pool_size(struct z3fold_pool *pool) +{ + return atomic64_read(&pool->pages_nr); +} + +/***************** + * zpool + ****************/ + +static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) +{ + if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) + return pool->zpool_ops->evict(pool->zpool, handle); + else + return -ENOENT; +} + +static const struct z3fold_ops z3fold_zpool_ops = { + .evict = z3fold_zpool_evict +}; + +static void *z3fold_zpool_create(const char *name, gfp_t gfp, + const struct zpool_ops *zpool_ops, + struct zpool *zpool) +{ + struct z3fold_pool *pool; + + pool = z3fold_create_pool(name, gfp, + zpool_ops ? &z3fold_zpool_ops : NULL); + if (pool) { + pool->zpool = zpool; + pool->zpool_ops = zpool_ops; + pool->name = name; + } + return pool; +} + +static void z3fold_zpool_destroy(void *pool) +{ + z3fold_destroy_pool(pool); +} + +static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, + unsigned long *handle) +{ + return z3fold_alloc(pool, size, gfp, handle); +} +static void z3fold_zpool_free(void *pool, unsigned long handle) +{ + z3fold_free(pool, handle); +} + +static int z3fold_zpool_shrink(void *pool, unsigned int pages, + unsigned int *reclaimed) +{ + unsigned int total = 0; + int ret = -EINVAL; + + while (total < pages) { + ret = z3fold_reclaim_page(pool, 8); + if (ret < 0) + break; + total++; + } + + if (reclaimed) + *reclaimed = total; + + return ret; +} + +static void *z3fold_zpool_map(void *pool, unsigned long handle, + enum zpool_mapmode mm) +{ + return z3fold_map(pool, handle); +} +static void z3fold_zpool_unmap(void *pool, unsigned long handle) +{ + z3fold_unmap(pool, handle); +} + +static u64 z3fold_zpool_total_size(void *pool) +{ + return z3fold_get_pool_size(pool) * PAGE_SIZE; +} + +static struct zpool_driver z3fold_zpool_driver = { + .type = "z3fold", + .owner = THIS_MODULE, + .create = z3fold_zpool_create, + .destroy = z3fold_zpool_destroy, + .malloc = z3fold_zpool_malloc, + .free = z3fold_zpool_free, + .shrink = z3fold_zpool_shrink, + .map = z3fold_zpool_map, + .unmap = z3fold_zpool_unmap, + .total_size = z3fold_zpool_total_size, +}; + +MODULE_ALIAS("zpool-z3fold"); + +static int __init init_z3fold(void) +{ + /* Make sure the z3fold header is not larger than the page size */ + BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); + zpool_register_driver(&z3fold_zpool_driver); + + return 0; +} + +static void __exit exit_z3fold(void) +{ + zpool_unregister_driver(&z3fold_zpool_driver); +} + +module_init(init_z3fold); +module_exit(exit_z3fold); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vitaly Wool "); +MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); diff --git a/mm/zpool.c b/mm/zpool.c index fd3ff719c32c..58aef91eb1e1 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -6,6 +6,11 @@ * This is a common frontend for memory storage pool implementations. * Typically, this is used to store compressed memory. */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -355,6 +360,32 @@ u64 zpool_get_total_size(struct zpool *zpool) return zpool->driver->total_size(zpool->pool); } +/** + * zpool_compact() - trigger backend-specific pool compaction + * @pool The zpool to compact + * + * This returns the total size in bytes of the pool. + * + * Returns: Number of pages compacted + */ +unsigned long zpool_compact(struct zpool *zpool) +{ + return zpool->driver->compact ? + zpool->driver->compact(zpool->pool) : 0; +} + +/** + * zpool_get_num_compacted() - get the number of migrated/compacted pages + * @stats stats to fill in + * + * Returns: the total number of migrated pages for the pool + */ +unsigned long zpool_get_num_compacted(struct zpool *zpool) +{ + return zpool->driver->get_num_compacted ? + zpool->driver->get_num_compacted(zpool->pool) : 0; +} + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("Common API for compressed memory storage"); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 3f1b584bd5d0..6e7e1d8a2bad 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -10,6 +10,11 @@ * Released under the terms of 3-clause BSD License * Released under the terms of GNU General Public License Version 2.0 */ +/* + * NOTE: This file has been modified by Sony Mobile Communications Inc. + * Modifications are Copyright (c) 2015 Sony Mobile Communications Inc, + * and licensed under the license of the file. + */ /* * Following is how we use various fields and flags of underlying @@ -450,6 +455,20 @@ static u64 zs_zpool_total_size(void *pool) return zs_get_total_pages(pool) << PAGE_SHIFT; } +static unsigned long zs_zpool_compact(void *pool) +{ + return zs_compact(pool); +} + +static unsigned long zs_zpool_get_compacted(void *pool) +{ + struct zs_pool_stats stats; + + zs_pool_stats(pool, &stats); + return stats.pages_compacted; +} + + static struct zpool_driver zs_zpool_driver = { .type = "zsmalloc", .owner = THIS_MODULE, @@ -461,6 +480,8 @@ static struct zpool_driver zs_zpool_driver = { .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, + .compact = zs_zpool_compact, + .get_num_compacted = zs_zpool_get_compacted, }; MODULE_ALIAS("zpool-zsmalloc"); diff --git a/net/802/garp.c b/net/802/garp.c index 5239b8f244e7..b38ee6dcba45 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -206,19 +206,6 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr kfree(attr); } -static void garp_attr_destroy_all(struct garp_applicant *app) -{ - struct rb_node *node, *next; - struct garp_attr *attr; - - for (node = rb_first(&app->gid); - next = node ? rb_next(node) : NULL, node != NULL; - node = next) { - attr = rb_entry(node, struct garp_attr, node); - garp_attr_destroy(app, attr); - } -} - static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; @@ -625,7 +612,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl spin_lock_bh(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); - garp_attr_destroy_all(app); garp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/802/mrp.c b/net/802/mrp.c index 4ee3af3d400b..72db2785ef2c 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -295,19 +295,6 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) kfree(attr); } -static void mrp_attr_destroy_all(struct mrp_applicant *app) -{ - struct rb_node *node, *next; - struct mrp_attr *attr; - - for (node = rb_first(&app->mad); - next = node ? rb_next(node) : NULL, node != NULL; - node = next) { - attr = rb_entry(node, struct mrp_attr, node); - mrp_attr_destroy(app, attr); - } -} - static int mrp_pdu_init(struct mrp_applicant *app) { struct sk_buff *skb; @@ -913,7 +900,6 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) spin_lock_bh(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); - mrp_attr_destroy_all(app); mrp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 252a4c22898e..5892bd1457d4 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -605,7 +605,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); if (!chan->vc_wq) { err = -ENOMEM; - goto out_remove_file; + goto out_free_tag; } init_waitqueue_head(chan->vc_wq); chan->ring_bufs_avail = 1; @@ -623,8 +623,6 @@ static int p9_virtio_probe(struct virtio_device *vdev) return 0; -out_remove_file: - sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr); out_free_tag: kfree(tag); out_free_vq: diff --git a/net/Makefile b/net/Makefile index af7d4d7e4dad..0d45d4c93f62 100644 --- a/net/Makefile +++ b/net/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_WIREGUARD) += wireguard/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_XFRM) += xfrm/ -obj-$(CONFIG_UNIX_SCM) += unix/ +obj-$(CONFIG_UNIX) += unix/ obj-$(CONFIG_NET) += ipv6/ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 1048cddcc9a3..ace94170f55e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1575,8 +1575,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct sk_buff *skb; struct net_device *dev; struct ddpehdr *ddp; - int size, hard_header_len; - struct atalk_route *rt, *rt_lo = NULL; + int size; + struct atalk_route *rt; int err; if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) @@ -1639,22 +1639,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name); - hard_header_len = dev->hard_header_len; - /* Leave room for loopback hardware header if necessary */ - if (usat->sat_addr.s_node == ATADDR_BCAST && - (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) { - struct atalk_addr at_lo; - - at_lo.s_node = 0; - at_lo.s_net = 0; - - rt_lo = atrtr_find(&at_lo); - - if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len) - hard_header_len = rt_lo->dev->hard_header_len; - } - - size += hard_header_len; + size += dev->hard_header_len; release_sock(sk); skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); lock_sock(sk); @@ -1662,7 +1647,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) goto out; skb_reserve(skb, ddp_dl->header_length); - skb_reserve(skb, hard_header_len); + skb_reserve(skb, dev->hard_header_len); skb->dev = dev; SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); @@ -1713,12 +1698,18 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) /* loop back */ skb_orphan(skb); if (ddp->deh_dnode == ATADDR_BCAST) { - if (!rt_lo) { + struct atalk_addr at_lo; + + at_lo.s_node = 0; + at_lo.s_net = 0; + + rt = atrtr_find(&at_lo); + if (!rt) { kfree_skb(skb); err = -ENETUNREACH; goto out; } - dev = rt_lo->dev; + dev = rt->dev; skb->dev = dev; } ddp_dl->request(ddp_dl, skb, dev->dev_addr); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index c4ef1be59cb1..64fede18aa33 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -80,7 +80,6 @@ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; - struct sock *sk; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; @@ -89,15 +88,11 @@ static void ax25_kill_by_device(struct net_device *dev) again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { - sk = s->sk; - sock_hold(sk); - spin_unlock_bh(&ax25_list_lock); - lock_sock(sk); s->ax25_dev = NULL; - release_sock(sk); + spin_unlock_bh(&ax25_list_lock); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); - sock_put(sk); + /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 6f8d2fe114f6..caea5bb38d4b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -526,10 +526,8 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) if (WARN_ON(!forw_packet->if_outgoing)) goto out; - if (forw_packet->if_outgoing->soft_iface != soft_iface) { - pr_warn("%s: soft interface switch for queued OGM\n", __func__); + if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface)) goto out; - } if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) goto out; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 355a18d373e6..1267cbb1a329 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1346,14 +1346,10 @@ int batadv_bla_init(struct batadv_priv *bat_priv) return 0; bat_priv->bla.claim_hash = batadv_hash_new(128); - if (!bat_priv->bla.claim_hash) - return -ENOMEM; - bat_priv->bla.backbone_hash = batadv_hash_new(32); - if (!bat_priv->bla.backbone_hash) { - batadv_hash_destroy(bat_priv->bla.claim_hash); + + if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) return -ENOMEM; - } batadv_hash_set_lock_class(bat_priv->bla.claim_hash, &batadv_claim_hash_lock_class_key); @@ -1370,32 +1366,31 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. + * batadv_bla_check_bcast_duplist * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the multicast packet to be checked - * @payload_ptr: pointer to position inside the head buffer of the skb - * marking the start of the data to be CRC'ed - * @orig: originator mac address, NULL if unknown + * @skb: contains the bcast_packet to be checked * - * Check if it is on our broadcast list. Another gateway might have sent the - * same packet because it is connected to the same backbone, so we have to - * remove this duplicate. + * check if it is on our broadcast list. Another gateway might + * have sent the same packet because it is connected to the same backbone, + * so we have to remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore * sent by another host, drop it. We allow equal packets from * the same host however as this might be intended. */ -static int batadv_bla_check_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb, u8 *payload_ptr, - const u8 *orig) +int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) { int i, curr, ret = 0; __be32 crc; + struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; + bcast_packet = (struct batadv_bcast_packet *)skb->data; + /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, payload_ptr); + crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1414,21 +1409,8 @@ static int batadv_bla_check_duplist(struct batadv_priv *bat_priv, if (entry->crc != crc) continue; - /* are the originators both known and not anonymous? */ - if (orig && !is_zero_ether_addr(orig) && - !is_zero_ether_addr(entry->orig)) { - /* If known, check if the new frame came from - * the same originator: - * We are safe to take identical frames from the - * same orig, if known, as multiplications in - * the mesh are detected via the (orig, seqno) pair. - * So we can be a bit more liberal here and allow - * identical frames from the same orig which the source - * host might have sent multiple times on purpose. - */ - if (batadv_compare_eth(entry->orig, orig)) - continue; - } + if (batadv_compare_eth(entry->orig, bcast_packet->orig)) + continue; /* this entry seems to match: same crc, not too old, * and from another gw. therefore return 1 to forbid it. @@ -1444,14 +1426,7 @@ static int batadv_bla_check_duplist(struct batadv_priv *bat_priv, entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; - - /* known originator */ - if (orig) - ether_addr_copy(entry->orig, orig); - /* anonymous originator */ - else - eth_zero_addr(entry->orig); - + ether_addr_copy(entry->orig, bcast_packet->orig); bat_priv->bla.bcast_duplist_curr = curr; out: @@ -1460,48 +1435,6 @@ out: return ret; } -/** - * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. - * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the multicast packet to be checked, decapsulated from a - * unicast_packet - * - * Check if it is on our broadcast list. Another gateway might have sent the - * same packet because it is connected to the same backbone, so we have to - * remove this duplicate. - * - * Return: true if a packet is in the duplicate list, false otherwise. - */ -static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) -{ - return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); -} - -/** - * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. - * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the bcast_packet to be checked - * - * Check if it is on our broadcast list. Another gateway might have sent the - * same packet because it is connected to the same backbone, so we have to - * remove this duplicate. - * - * Return: true if a packet is in the duplicate list, false otherwise. - */ -int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) -{ - struct batadv_bcast_packet *bcast_packet; - u8 *payload_ptr; - - bcast_packet = (struct batadv_bcast_packet *)skb->data; - payload_ptr = (u8 *)(bcast_packet + 1); - - return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, - bcast_packet->orig); -} - /** * batadv_bla_is_backbone_gw_orig * @bat_priv: the bat priv with all the soft interface information @@ -1605,7 +1538,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @packet_type: the batman packet type this frame came in + * @is_bcast: the packet came in a broadcast packet type. * * bla_rx avoidance checks if: * * we have to race for a claim @@ -1616,7 +1549,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * process the skb. */ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, int packet_type) + unsigned short vid, bool is_bcast) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; @@ -1635,32 +1568,9 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) - /* don't allow multicast packets while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest)) - /* Both broadcast flooding or multicast-via-unicasts - * delivery might send to multiple backbone gateways - * sharing the same LAN and therefore need to coordinate - * which backbone gateway forwards into the LAN, - * by claiming the payload source address. - * - * Broadcast flooding and multicast-via-unicasts - * delivery use the following two batman packet types. - * Note: explicitly exclude BATADV_UNICAST_4ADDR, - * as the DHCP gateway feature will send explicitly - * to only one BLA gateway, so the claiming process - * should be avoided there. - */ - if (packet_type == BATADV_BCAST || - packet_type == BATADV_UNICAST) - goto handled; - - /* potential duplicates from foreign BLA backbone gateways via - * multicast-in-unicast packets - */ - if (is_multicast_ether_addr(ethhdr->h_dest) && - packet_type == BATADV_UNICAST && - batadv_bla_check_ucast_duplist(bat_priv, skb)) - goto handled; + /* don't allow broadcasts while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) + goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; @@ -1688,14 +1598,13 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; } - /* if it is a multicast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && - (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { + /* if it is a broadcast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { /* ... drop it. the responsible gateway is in charge. * - * We need to check packet type because with the gateway + * We need to check is_bcast because with the gateway * feature, broadcasts (like DHCP requests) may be sent - * using a unicast 4 address packet type. See comment above. + * using a unicast packet type. */ goto handled; } else { diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index d1553c46df8c..025152b34282 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -27,7 +27,7 @@ struct sk_buff; #ifdef CONFIG_BATMAN_ADV_BLA int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, int packet_type); + unsigned short vid, bool is_bcast); int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); int batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -50,7 +50,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv); static inline int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, - int packet_type) + bool is_bcast) { return 0; } diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index b905763dc2e7..b2ef03a3a2d4 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -214,7 +214,6 @@ static const struct file_operations batadv_log_fops = { .read = batadv_log_read, .poll = batadv_log_poll, .llseek = no_llseek, - .owner = THIS_MODULE, }; static int batadv_debug_log_setup(struct batadv_priv *bat_priv) diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 371f50804fc2..9751b207b01f 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -394,10 +394,9 @@ out: /** * batadv_frag_create - create a fragment from skb - * @net_dev: outgoing device for fragment * @skb: skb to create fragment from * @frag_head: header to use in new fragment - * @fragment_size: size of new fragment + * @mtu: size of new fragment * * Split the passed skb into two fragments: A new one with size matching the * passed mtu and the old one with the rest. The new skb contains data from the @@ -405,25 +404,22 @@ out: * * Returns the new fragment, NULL on error. */ -static struct sk_buff *batadv_frag_create(struct net_device *net_dev, - struct sk_buff *skb, +static struct sk_buff *batadv_frag_create(struct sk_buff *skb, struct batadv_frag_packet *frag_head, - unsigned int fragment_size) + unsigned int mtu) { - unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev); - unsigned int tailroom = net_dev->needed_tailroom; struct sk_buff *skb_fragment; unsigned header_size = sizeof(*frag_head); - unsigned mtu = fragment_size + header_size; + unsigned fragment_size = mtu - header_size; - skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom); + skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); if (!skb_fragment) goto err; skb->priority = TC_PRIO_CONTROL; /* Eat the last mtu-bytes of the skb */ - skb_reserve(skb_fragment, ll_reserved + header_size); + skb_reserve(skb_fragment, header_size + ETH_HLEN); skb_split(skb, skb_fragment, skb->len - fragment_size); /* Add the header */ @@ -446,14 +442,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_neigh_node *neigh_node) { - struct net_device *net_dev = neigh_node->if_incoming->net_dev; struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_frag_packet frag_header; struct sk_buff *skb_fragment; - unsigned mtu = net_dev->mtu; + unsigned mtu = neigh_node->if_incoming->net_dev->mtu; unsigned header_size = sizeof(frag_header); - unsigned max_fragment_size, num_fragments; + unsigned max_fragment_size, max_packet_size; bool ret = false; /* To avoid merge and refragmentation at next-hops we never send @@ -461,15 +456,10 @@ bool batadv_frag_send_packet(struct sk_buff *skb, */ mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE); max_fragment_size = mtu - header_size; - - if (skb->len == 0 || max_fragment_size == 0) - goto out_err; - - num_fragments = (skb->len - 1) / max_fragment_size + 1; - max_fragment_size = (skb->len - 1) / num_fragments + 1; + max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; /* Don't even try to fragment, if we need more than 16 fragments */ - if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) + if (skb->len > max_packet_size) goto out_err; bat_priv = orig_node->bat_priv; @@ -494,8 +484,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb, if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) goto out_err; - skb_fragment = batadv_frag_create(net_dev, skb, &frag_header, - max_fragment_size); + skb_fragment = batadv_frag_create(skb, &frag_header, mtu); if (!skb_fragment) goto out_err; @@ -507,13 +496,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb, frag_header.no++; } - /* make sure that there is at least enough head for the fragmentation - * and ethernet headers - */ - if (skb_cow_head(skb, ETH_HLEN + header_size) < 0) + /* Make room for the fragment header. */ + if (batadv_skb_head_push(skb, header_size) < 0 || + pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) goto out_err; - skb_push(skb, header_size); memcpy(skb->data, &frag_header, header_size); /* Send the last fragment */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 0bd7c9e6c9a0..c59bbc327763 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -316,9 +316,6 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface) needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN); needed_headroom += batadv_max_header_len(); - /* fragmentation headers don't strip the unicast/... header */ - needed_headroom += sizeof(struct batadv_frag_packet); - soft_iface->needed_headroom = needed_headroom; soft_iface->needed_tailroom = lower_tailroom; } diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 8ba7b86579d4..88cea5154113 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -159,34 +159,24 @@ int batadv_mesh_init(struct net_device *soft_iface) INIT_HLIST_HEAD(&bat_priv->softif_vlan_list); ret = batadv_originator_init(bat_priv); - if (ret < 0) { - atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); - goto err_orig; - } + if (ret < 0) + goto err; ret = batadv_tt_init(bat_priv); - if (ret < 0) { - atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); - goto err_tt; - } + if (ret < 0) + goto err; ret = batadv_bla_init(bat_priv); - if (ret < 0) { - atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); - goto err_bla; - } + if (ret < 0) + goto err; ret = batadv_dat_init(bat_priv); - if (ret < 0) { - atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); - goto err_dat; - } + if (ret < 0) + goto err; ret = batadv_nc_mesh_init(bat_priv); - if (ret < 0) { - atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); - goto err_nc; - } + if (ret < 0) + goto err; batadv_gw_init(bat_priv); batadv_mcast_init(bat_priv); @@ -196,18 +186,8 @@ int batadv_mesh_init(struct net_device *soft_iface) return 0; -err_nc: - batadv_dat_free(bat_priv); -err_dat: - batadv_bla_free(bat_priv); -err_bla: - batadv_tt_free(bat_priv); -err_tt: - batadv_originator_free(bat_priv); -err_orig: - batadv_purge_outstanding_packets(bat_priv, NULL); - atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); - +err: + batadv_mesh_free(soft_iface); return ret; } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 44965f71ad73..8aa2d65df86f 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -44,9 +44,7 @@ #include #include -#include "bridge_loop_avoidance.h" #include "packet.h" -#include "send.h" #include "translation-table.h" /** @@ -807,35 +805,6 @@ void batadv_mcast_free(struct batadv_priv *bat_priv) batadv_mcast_mla_tt_retract(bat_priv, NULL); } -/** - * batadv_mcast_forw_send_orig() - send a multicast packet to an originator - * @bat_priv: the bat priv with all the soft interface information - * @skb: the multicast packet to send - * @vid: the vlan identifier - * @orig_node: the originator to send the packet to - * - * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. - */ -int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, - struct sk_buff *skb, - unsigned short vid, - struct batadv_orig_node *orig_node) -{ - /* Avoid sending multicast-in-unicast packets to other BLA - * gateways - they already got the frame from the LAN side - * we share with them. - * TODO: Refactor to take BLA into account earlier, to avoid - * reducing the mcast_fanout count. - */ - if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { - dev_kfree_skb(skb); - return NET_XMIT_SUCCESS; - } - - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, - orig_node, vid); -} - /** * batadv_mcast_purge_orig - reset originator global mcast state modifications * @orig: the originator which is going to get purged diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index dd83ef07e2f2..8f3cb04b9f13 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -44,11 +44,6 @@ enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node **mcast_single_orig); -int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, - struct sk_buff *skb, - unsigned short vid, - struct batadv_orig_node *orig_node); - void batadv_mcast_init(struct batadv_priv *bat_priv); void batadv_mcast_free(struct batadv_priv *bat_priv); @@ -73,16 +68,6 @@ static inline int batadv_mcast_init(struct batadv_priv *bat_priv) return 0; } -static inline int -batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, - struct sk_buff *skb, - unsigned short vid, - struct batadv_orig_node *orig_node) -{ - kfree_skb(skb); - return NET_XMIT_DROP; -} - static inline void batadv_mcast_free(struct batadv_priv *bat_priv) { } diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 9317d872b9c0..91de807a8f03 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -159,10 +159,8 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) &batadv_nc_coding_hash_lock_class_key); bat_priv->nc.decoding_hash = batadv_hash_new(128); - if (!bat_priv->nc.decoding_hash) { - batadv_hash_destroy(bat_priv->nc.coding_hash); + if (!bat_priv->nc.decoding_hash) goto err; - } batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, &batadv_nc_decoding_hash_lock_class_key); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5105e860d3aa..ff693887ea82 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -353,8 +353,9 @@ send: goto dropped; ret = batadv_send_skb_via_gw(bat_priv, skb, vid); } else if (mcast_single_orig) { - ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, - mcast_single_orig); + ret = batadv_send_skb_unicast(bat_priv, skb, + BATADV_UNICAST, 0, + mcast_single_orig, vid); } else { if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) @@ -393,10 +394,10 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; - int packet_type; + bool is_bcast; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; - packet_type = batadv_bcast_packet->packet_type; + is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) @@ -444,7 +445,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) + if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) goto out; if (orig_node) @@ -538,20 +539,15 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) struct batadv_softif_vlan *vlan; int err; - spin_lock_bh(&bat_priv->softif_vlan_list_lock); - vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_free_ref(vlan); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -EEXIST; } vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) { - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + if (!vlan) return -ENOMEM; - } vlan->bat_priv = bat_priv; vlan->vid = vid; @@ -559,19 +555,16 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) atomic_set(&vlan->ap_isolation, 0); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - - /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the - * sleeping behavior of the sysfs functions and the fs_reclaim lock - */ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { - /* ref for the list */ - batadv_softif_vlan_free_ref(vlan); + kfree(vlan); return err; } + spin_lock_bh(&bat_priv->softif_vlan_list_lock); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 1e71e0c9b47b..06f366d234ff 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -871,7 +871,6 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, tt_vlan->vid = htons(vlan->vid); tt_vlan->crc = htonl(vlan->tt.crc); - tt_vlan->reserved = 0; tt_vlan++; } @@ -1426,8 +1425,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, ether_addr_copy(common->addr, tt_addr); common->vid = vid; - if (!is_multicast_ether_addr(common->addr)) - common->flags = flags & (~BATADV_TT_SYNC_MASK); + common->flags = flags & (~BATADV_TT_SYNC_MASK); tt_global_entry->roam_at = 0; /* node must store current time in case of roaming. This is @@ -1490,8 +1488,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, * TT_CLIENT_WIFI, therefore they have to be copied in the * client entry */ - if (!is_multicast_ether_addr(common->addr)) - tt_global_entry->common.flags |= flags & (~BATADV_TT_SYNC_MASK); + tt_global_entry->common.flags |= flags & (~BATADV_TT_SYNC_MASK); /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only * one originator left in the list and we previously received a @@ -3835,10 +3832,8 @@ int batadv_tt_init(struct batadv_priv *bat_priv) return ret; ret = batadv_tt_global_init(bat_priv); - if (ret < 0) { - batadv_tt_local_table_free(bat_priv); + if (ret < 0) return ret; - } batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1, batadv_tt_tvlv_unicast_handler_v1, diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index fcd819ffda10..8f918155685d 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -388,9 +388,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, hdev = hci_dev_get(req->id); if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { struct a2mp_amp_assoc_rsp rsp; + rsp.id = req->id; memset(&rsp, 0, sizeof(rsp)); - rsp.id = req->id; if (tmp) { rsp.status = A2MP_STATUS_COLLISION_OCCURED; @@ -519,7 +519,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); if (!assoc) { amp_ctrl_put(ctrl); - hci_dev_put(hdev); return -ENOMEM; } diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index b01b43ab6f83..e32f34189007 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -305,9 +305,6 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, struct hci_request req; int err = 0; - if (!mgr) - return; - cp.phy_handle = hcon->handle; cp.len_so_far = cpu_to_le16(0); cp.max_len = cpu_to_le16(hdev->amp_assoc_size); diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h index f6b9dc4e408f..c32638dddbf9 100644 --- a/net/bluetooth/cmtp/cmtp.h +++ b/net/bluetooth/cmtp/cmtp.h @@ -26,7 +26,7 @@ #include #include -#define BTNAMSIZ 21 +#define BTNAMSIZ 18 /* CMTP ioctl defines */ #define CMTPCONNADD _IOW('C', 200, int) diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 2133b53eb152..77f73bfa840b 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -392,11 +392,6 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) if (!(session->flags & BIT(CMTP_LOOPBACK))) { err = cmtp_attach_device(session); if (err < 0) { - /* Caller will call fput in case of failure, and so - * will cmtp_session kthread. - */ - get_file(session->sock->file); - atomic_inc(&session->terminate); wake_up_interruptible(sk_sleep(session->sock->sk)); up_write(&cmtp_session_sem); @@ -500,7 +495,9 @@ static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); - return cmtp_init_sockets(); + cmtp_init_sockets(); + + return 0; } static void __exit cmtp_exit(void) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f88076f55ce9..599b78ecf3e9 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -371,17 +371,12 @@ static int hci_req_sync(struct hci_dev *hdev, { int ret; + if (!test_bit(HCI_UP, &hdev->flags)) + return -ENETDOWN; + /* Serialize all requests */ hci_req_lock(hdev); - /* check the state after obtaing the lock to protect the HCI_UP - * against any races from hci_dev_do_close when the controller - * gets removed. - */ - if (test_bit(HCI_UP, &hdev->flags)) - ret = __hci_req_sync(hdev, req, opt, timeout); - else - ret = -ENETDOWN; - + ret = __hci_req_sync(hdev, req, opt, timeout); hci_req_unlock(hdev); return ret; @@ -1357,12 +1352,6 @@ int hci_inquiry(void __user *arg) goto done; } - /* Restrict maximum inquiry length to 60 seconds */ - if (ir.length > 60) { - err = -EINVAL; - goto done; - } - hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { @@ -1383,10 +1372,8 @@ int hci_inquiry(void __user *arg) * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, - TASK_INTERRUPTIBLE)) { - err = -EINTR; - goto done; - } + TASK_INTERRUPTIBLE)) + return -EINTR; } /* for unlimited number of responses we will use buffer with @@ -1561,13 +1548,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) } else { /* Init failed, cleanup */ flush_work(&hdev->tx_work); - - /* Since hci_rx_work() is possible to awake new cmd_work - * it should be flushed first to avoid unexpected call of - * hci_cmd_work() - */ - flush_work(&hdev->rx_work); flush_work(&hdev->cmd_work); + flush_work(&hdev->rx_work); skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->rx_q); @@ -1685,14 +1667,6 @@ int hci_dev_do_close(struct hci_dev *hdev) hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); - if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && - !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && - test_bit(HCI_UP, &hdev->flags)) { - /* Execute vendor specific shutdown routine */ - if (hdev->shutdown) - hdev->shutdown(hdev); - } - if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { cancel_delayed_work_sync(&hdev->cmd_timer); hci_req_unlock(hdev); @@ -3459,7 +3433,6 @@ int hci_register_dev(struct hci_dev *hdev) return id; err_wqueue: - debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: @@ -3472,10 +3445,14 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { + int id; + BT_DBG("%pK name %s bus %d", hdev, hdev->name, hdev->bus); hci_dev_set_flag(hdev, HCI_UNREGISTER); + id = hdev->id; + write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -3504,14 +3481,7 @@ void hci_unregister_dev(struct hci_dev *hdev) } device_del(&hdev->dev); - /* Actual cleanup is deferred until hci_cleanup_dev(). */ - hci_dev_put(hdev); -} -EXPORT_SYMBOL(hci_unregister_dev); -/* Cleanup HCI device */ -void hci_cleanup_dev(struct hci_dev *hdev) -{ debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); @@ -3531,8 +3501,11 @@ void hci_cleanup_dev(struct hci_dev *hdev) hci_discovery_filter_clear(hdev); hci_dev_unlock(hdev); - ida_simple_remove(&hci_index_ida, hdev->id); + hci_dev_put(hdev); + + ida_simple_remove(&hci_index_ida, id); } +EXPORT_SYMBOL(hci_unregister_dev); /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 60cddf0b36e6..19df2343734f 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3747,21 +3747,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, switch (ev->status) { case 0x00: - /* The synchronous connection complete event should only be - * sent once per new connection. Receiving a successful - * complete event when the connection status is already - * BT_CONNECTED means that the device is misbehaving and sent - * multiple complete event packets for the same new connection. - * - * Registering the device more than once can corrupt kernel - * memory, hence upon detecting this invalid event, we report - * an error and ignore the packet. - */ - if (conn->state == BT_CONNECTED) { - bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); - goto unlock; - } - conn->handle = __le16_to_cpu(ev->handle); conn->state = BT_CONNECTED; conn->type = ev->link_type; @@ -4351,11 +4336,6 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev, return; } - if (!hcon->amp_mgr) { - hci_dev_unlock(hdev); - return; - } - if (ev->status) { hci_conn_del(hcon); hci_dev_unlock(hdev); @@ -4400,7 +4380,6 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) return; hchan->handle = le16_to_cpu(ev->handle); - hchan->amp = true; BT_DBG("hcon %pK mgr %pK hchan %pK", hcon, hcon->amp_mgr, hchan); @@ -4433,7 +4412,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, hci_dev_lock(hdev); hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); - if (!hchan || !hchan->amp) + if (!hchan) goto unlock; amp_destroy_logical_link(hchan, ev->reason); @@ -4940,13 +4919,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) struct hci_ev_le_advertising_info *ev = ptr; s8 rssi; - if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) { - bt_dev_err(hdev, "Malicious advertising data."); - break; - } - - if (ev->length <= HCI_MAX_AD_LENGTH && - ev->data + ev->length <= skb_tail_pointer(skb)) { + if (ev->length <= HCI_MAX_AD_LENGTH) { rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, @@ -5141,18 +5114,20 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; - struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; - - if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) - return; + void *ptr = &skb->data[1]; hci_dev_lock(hdev); - for (; num_reports; num_reports--, ev++) + while (num_reports--) { + struct hci_ev_le_direct_adv_info *ev = ptr; + process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, ev->direct_addr_type, ev->rssi, NULL, 0); + ptr += sizeof(*ev); + } + hci_dev_unlock(hdev); } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index f84cfd0d4c65..4f6f5d89278b 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -53,17 +53,6 @@ struct hci_pinfo { unsigned long flags; }; -static struct hci_dev *hci_hdev_from_sock(struct sock *sk) -{ - struct hci_dev *hdev = hci_pi(sk)->hdev; - - if (!hdev) - return ERR_PTR(-EBADFD); - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) - return ERR_PTR(-EPIPE); - return hdev; -} - void hci_sock_set_flag(struct sock *sk, int nr) { set_bit(nr, &hci_pi(sk)->flags); @@ -491,13 +480,19 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) if (event == HCI_DEV_UNREG) { struct sock *sk; - /* Wake up sockets using this dead device */ + /* Detach sockets from device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, &hci_sk_list.head) { + bh_lock_sock_nested(sk); if (hci_pi(sk)->hdev == hdev) { + hci_pi(sk)->hdev = NULL; sk->sk_err = EPIPE; + sk->sk_state = BT_OPEN; sk->sk_state_change(sk); + + hci_dev_put(hdev); } + bh_unlock_sock(sk); } read_unlock(&hci_sk_list.lock); } @@ -636,10 +631,10 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { - struct hci_dev *hdev = hci_hdev_from_sock(sk); + struct hci_dev *hdev = hci_pi(sk)->hdev; - if (IS_ERR(hdev)) - return PTR_ERR(hdev); + if (!hdev) + return -EBADFD; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; @@ -771,18 +766,6 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, lock_sock(sk); - /* Allow detaching from dead device and attaching to alive device, if - * the caller wants to re-bind (instead of close) this socket in - * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. - */ - hdev = hci_pi(sk)->hdev; - if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { - hci_pi(sk)->hdev = NULL; - sk->sk_state = BT_OPEN; - hci_dev_put(hdev); - } - hdev = NULL; - if (sk->sk_state == BT_BOUND) { err = -EALREADY; goto done; @@ -954,9 +937,9 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, lock_sock(sk); - hdev = hci_hdev_from_sock(sk); - if (IS_ERR(hdev)) { - err = PTR_ERR(hdev); + hdev = hci_pi(sk)->hdev; + if (!hdev) { + err = -EBADFD; goto done; } @@ -1208,9 +1191,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, goto done; } - hdev = hci_hdev_from_sock(sk); - if (IS_ERR(hdev)) { - err = PTR_ERR(hdev); + hdev = hci_pi(sk)->hdev; + if (!hdev) { + err = -EBADFD; goto done; } diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index a76b1371a7fc..4f78b28686ff 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -180,9 +180,6 @@ ATTRIBUTE_GROUPS(bt_host); static void bt_host_release(struct device *dev) { struct hci_dev *hdev = to_hci_dev(dev); - - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) - hci_cleanup_dev(hdev); kfree(hdev); module_put(THIS_MODULE); } diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 8e47a5392129..e614940c4e98 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -735,7 +735,7 @@ static void hidp_stop(struct hid_device *hid) hid->claimed = 0; } -struct hid_ll_driver hidp_hid_driver = { +static struct hid_ll_driver hidp_hid_driver = { .parse = hidp_parse, .start = hidp_start, .stop = hidp_stop, @@ -744,7 +744,6 @@ struct hid_ll_driver hidp_hid_driver = { .raw_request = hidp_raw_request, .output_report = hidp_output_report, }; -EXPORT_SYMBOL_GPL(hidp_hid_driver); /* This function sets up the hid device. It does not add it to the HID system. That is done in hidp_add_connection(). */ @@ -1284,7 +1283,7 @@ static int hidp_session_thread(void *arg) /* cleanup runtime environment */ remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); - remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait); wake_up_interruptible(&session->report_queue); hidp_del_timer(session); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 820c78945e16..27bf8e591e28 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -434,8 +434,6 @@ struct l2cap_chan *l2cap_chan_create(void) if (!chan) return NULL; - skb_queue_head_init(&chan->tx_q); - skb_queue_head_init(&chan->srej_q); mutex_init(&chan->lock); /* Set default lock nesting level */ @@ -501,9 +499,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan) chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; - chan->conf_state = 0; - set_bit(CONF_NOT_COMPLETE, &chan->conf_state); set_bit(FLAG_FORCE_ACTIVE, &chan->flags); } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 6c127f4ac3a2..493d84bfdfa6 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1309,9 +1309,6 @@ static void l2cap_sock_close_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; - if (!sk) - return; - l2cap_sock_kill(sk); } @@ -1320,9 +1317,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) struct sock *sk = chan->data; struct sock *parent; - if (!sk) - return; - BT_DBG("chan %pK state %s", chan, state_to_string(chan->state)); /* This callback can be called both for server (BT_LISTEN) @@ -1492,10 +1486,8 @@ static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %pK", sk); - if (l2cap_pi(sk)->chan) { - l2cap_pi(sk)->chan->data = NULL; + if (l2cap_pi(sk)->chan) l2cap_chan_put(l2cap_pi(sk)->chan); - } if (l2cap_pi(sk)->rx_busy_skb) { kfree_skb(l2cap_pi(sk)->rx_busy_skb); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 9ffabc81057c..9f2f850f5841 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -212,15 +212,12 @@ static u8 mgmt_status_table[] = { MGMT_STATUS_TIMEOUT, /* Instant Passed */ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ MGMT_STATUS_FAILED, /* Transaction Collision */ - MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ MGMT_STATUS_REJECTED, /* QoS Rejected */ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ MGMT_STATUS_REJECTED, /* Insufficient Security */ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ - MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_BUSY, /* Role Switch Pending */ - MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_FAILED, /* Slot Violation */ MGMT_STATUS_FAILED, /* Role Switch Failed */ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ @@ -2285,6 +2282,10 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, BT_DBG("request for %s", hdev->name); + if (!IS_ENABLED(CONFIG_BT_HS)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, @@ -2434,10 +2435,6 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) BT_DBG("request for %s", hdev->name); - if (!IS_ENABLED(CONFIG_BT_HS)) - return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, - MGMT_STATUS_NOT_SUPPORTED); - status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 4c20ceaf3089..46c3f086a261 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -83,6 +83,7 @@ static void sco_sock_timeout(unsigned long arg) sk->sk_state_change(sk); bh_unlock_sock(sk); + sco_sock_kill(sk); sock_put(sk); } @@ -174,6 +175,7 @@ static void sco_conn_del(struct hci_conn *hcon, int err) sco_sock_clear_timer(sk); sco_chan_del(sk, err); bh_unlock_sock(sk); + sco_sock_kill(sk); sock_put(sk); } @@ -269,8 +271,7 @@ done: return err; } -static int sco_send_frame(struct sock *sk, void *buf, int len, - unsigned int msg_flags) +static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; @@ -282,11 +283,15 @@ static int sco_send_frame(struct sock *sk, void *buf, int len, BT_DBG("sk %pK len %d", sk, len); - skb = bt_skb_send_alloc(sk, len, msg_flags & MSG_DONTWAIT, &err); + skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; - memcpy(skb_put(skb, len), buf, len); + if (memcpy_from_msg(skb_put(skb, len), msg, len)) { + kfree_skb(skb); + return -EFAULT; + } + hci_send_sco(conn->hcon, skb); return len; @@ -387,7 +392,8 @@ static void sco_sock_cleanup_listen(struct sock *parent) */ static void sco_sock_kill(struct sock *sk) { - if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %pK state %d", sk, sk->sk_state); @@ -439,6 +445,7 @@ static void sco_sock_close(struct sock *sk) lock_sock(sk); __sco_sock_close(sk); release_sock(sk); + sco_sock_kill(sk); } static void sco_sock_init(struct sock *sk, struct sock *parent) @@ -697,7 +704,6 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; - void *buf; int err; BT_DBG("sock %pK, sk %pK", sock, sk); @@ -709,24 +715,14 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (memcpy_from_msg(buf, msg, len)) { - kfree(buf); - return -EFAULT; - } - lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = sco_send_frame(sk, buf, len, msg->msg_flags); + err = sco_send_frame(sk, msg, len); else err = -ENOTCONN; release_sock(sk); - kfree(buf); return err; } @@ -767,11 +763,6 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; - default: - /* use CVSD settings as fallback */ - cp.max_latency = cpu_to_le16(0xffff); - cp.retrans_effort = 0xff; - break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 1d9c517dd3d3..d3114dfd8adf 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2654,15 +2654,6 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) if (skb->len < sizeof(*key)) return SMP_INVALID_PARAMS; - /* Check if remote and local public keys are the same and debug key is - * not in use. - */ - if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && - !crypto_memneq(key, smp->local_pk, 64)) { - bt_dev_err(hdev, "Remote and local public keys are identical"); - return SMP_UNSPECIFIED; - } - memcpy(smp->remote_pk, key, 64); if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index f580dbaac5a9..6c84c9b8d60b 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -711,17 +711,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff mtu_reserved = nf_bridge_mtu_reduction(skb); mtu = skb->dev->mtu; - if (nf_bridge->pkt_otherhost) { - skb->pkt_type = PACKET_OTHERHOST; - nf_bridge->pkt_otherhost = false; - } - if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - nf_bridge_push_encap_header(skb); - if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); @@ -739,6 +731,8 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + data = this_cpu_ptr(&brnf_frag_data_storage); data->vlan_tci = skb->vlan_tci; @@ -761,6 +755,8 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; @@ -808,6 +804,8 @@ static unsigned int br_nf_post_routing(void *priv, else return NF_ACCEPT; + /* We assume any code from br_dev_queue_push_xmit onwards doesn't care + * about the value of skb->pkt_type. */ if (skb->pkt_type == PACKET_OTHERHOST) { skb->pkt_type = PACKET_HOST; nf_bridge->pkt_otherhost = true; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 1b848a45047b..a7953962112a 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -225,10 +225,8 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags) } masterv = br_vlan_get_master(br, v->vid); - if (!masterv) { - err = -ENOMEM; + if (!masterv) goto out_filt; - } v->brvlan = masterv; } diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c index 61a9f1be1263..517e78befcb2 100644 --- a/net/bridge/netfilter/ebt_limit.c +++ b/net/bridge/netfilter/ebt_limit.c @@ -105,7 +105,6 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = { .match = ebt_limit_mt, .checkentry = ebt_limit_mt_check, .matchsize = sizeof(struct ebt_limit_info), - .usersize = offsetof(struct ebt_limit_info, prev), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct ebt_compat_limit_info), #endif diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index a28ffbbf7450..a0443d40d677 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -303,7 +303,7 @@ static void dev_flowctrl(struct net_device *dev, int on) caifd_put(caifd); } -int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, struct cflayer *link_support, int head_room, struct cflayer **layer, int (**rcv_func)(struct sk_buff *, struct net_device *, @@ -314,12 +314,11 @@ int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, enum cfcnfg_phy_preference pref; struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); struct caif_device_entry_list *caifdevs; - int res; caifdevs = caif_device_list(dev_net(dev)); caifd = caif_device_alloc(dev); if (!caifd) - return -ENOMEM; + return; *layer = &caifd->layer; spin_lock_init(&caifd->flow_lock); @@ -341,7 +340,7 @@ int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, sizeof(caifd->layer.name) - 1); caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; caifd->layer.transmit = transmit; - res = cfcnfg_add_phy_layer(cfg, + cfcnfg_add_phy_layer(cfg, dev, &caifd->layer, pref, @@ -351,7 +350,6 @@ int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, mutex_unlock(&caifdevs->lock); if (rcv_func) *rcv_func = receive; - return res; } EXPORT_SYMBOL(caif_enroll_dev); @@ -366,7 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, struct cflayer *layer, *link_support; int head_room = 0; struct caif_device_entry_list *caifdevs; - int res; cfg = get_cfcnfg(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev)); @@ -392,10 +389,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, break; } } - res = caif_enroll_dev(dev, caifdev, link_support, head_room, + caif_enroll_dev(dev, caifdev, link_support, head_room, &layer, NULL); - if (res) - cfserl_release(link_support); caifdev->flowctrl = dev_flowctrl; break; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3cfd413aa2c8..aa209b1066c9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -539,8 +539,7 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, goto err; ret = -EINVAL; - if (unlikely(msg->msg_iter.nr_segs == 0) || - unlikely(msg->msg_iter.iov->iov_base == NULL)) + if (unlikely(msg->msg_iter.iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index 485dde566c1a..5cd44f001f64 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -116,11 +116,6 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], return (struct cflayer *) this; } -static void cfusbl_release(struct cflayer *layer) -{ - kfree(layer); -} - static struct packet_type caif_usb_type __read_mostly = { .type = cpu_to_be16(ETH_P_802_EX1), }; @@ -133,7 +128,6 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, struct cflayer *layer, *link_support; struct usbnet *usbnet; struct usb_device *usbdev; - int res; /* Check whether we have a NCM device, and find its VID/PID. */ if (!(dev->dev.parent && dev->dev.parent->driver && @@ -176,11 +170,8 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, if (dev->num_tx_queues > 1) pr_warn("USB device uses more than one tx queue\n"); - res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, + caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, &layer, &caif_usb_type.func); - if (res) - goto err; - if (!pack_added) dev_add_pack(&caif_usb_type); pack_added = true; @@ -190,9 +181,6 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, layer->name[sizeof(layer->name) - 1] = 0; return 0; -err: - cfusbl_release(link_support); - return res; } static struct notifier_block caif_device_notifier = { diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index c45b531a6cd5..fa39fc298708 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -455,7 +455,7 @@ unlock: rcu_read_unlock(); } -int +void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, @@ -464,7 +464,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, { struct cflayer *frml; struct cfcnfg_phyinfo *phyinfo = NULL; - int i, res = 0; + int i; u8 phyid; mutex_lock(&cnfg->lock); @@ -478,15 +478,12 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); - res = -EEXIST; goto out; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); - if (!phyinfo) { - res = -ENOMEM; + if (!phyinfo) goto out_err; - } phy_layer->id = phyid; phyinfo->pref = pref; @@ -500,10 +497,8 @@ got_phyid: frml = cffrml_create(phyid, fcs); - if (!frml) { - res = -ENOMEM; + if (!frml) goto out_err; - } phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); @@ -521,12 +516,11 @@ got_phyid: list_add_rcu(&phyinfo->node, &cnfg->phys); out: mutex_unlock(&cnfg->lock); - return res; + return; out_err: kfree(phyinfo); mutex_unlock(&cnfg->lock); - return res; } EXPORT_SYMBOL(cfcnfg_add_phy_layer); diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index af1e1e36dc90..ce60f06d76de 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -31,11 +31,6 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); -void cfserl_release(struct cflayer *layer) -{ - kfree(layer); -} - struct cflayer *cfserl_create(int instance, bool use_stx) { struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 40f032f62029..67a4a36febd1 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -56,6 +56,20 @@ struct chnl_net { enum caif_states state; }; +static void robust_list_del(struct list_head *delete_node) +{ + struct list_head *list_node; + struct list_head *n; + ASSERT_RTNL(); + list_for_each_safe(list_node, n, &chnl_net_list) { + if (list_node == delete_node) { + list_del(list_node); + return; + } + } + WARN_ON(1); +} + static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) { struct sk_buff *skb; @@ -357,7 +371,6 @@ static int chnl_net_init(struct net_device *dev) ASSERT_RTNL(); priv = netdev_priv(dev); strncpy(priv->name, dev->name, sizeof(priv->name)); - INIT_LIST_HEAD(&priv->list_field); return 0; } @@ -366,7 +379,7 @@ static void chnl_net_uninit(struct net_device *dev) struct chnl_net *priv; ASSERT_RTNL(); priv = netdev_priv(dev); - list_del_init(&priv->list_field); + robust_list_del(&priv->list_field); } static const struct net_device_ops netdev_ops = { @@ -529,7 +542,7 @@ static void __exit chnl_exit_module(void) rtnl_lock(); list_for_each_safe(list_node, _tmp, &chnl_net_list) { dev = list_entry(list_node, struct chnl_net, list_field); - list_del_init(list_node); + list_del(list_node); delete_device(dev); } rtnl_unlock(); diff --git a/net/can/bcm.c b/net/can/bcm.c index 549ee0de456f..1f15622d3c65 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -121,7 +121,7 @@ struct bcm_sock { struct sock sk; int bound; int ifindex; - struct list_head notifier; + struct notifier_block notifier; struct list_head rx_ops; struct list_head tx_ops; unsigned long dropped_usr_msgs; @@ -129,10 +129,6 @@ struct bcm_sock { char procname [32]; /* inode number in decimal with \0 */ }; -static LIST_HEAD(bcm_notifier_list); -static DEFINE_SPINLOCK(bcm_notifier_lock); -static struct bcm_sock *bcm_busy_notifier; - static inline struct bcm_sock *bcm_sk(const struct sock *sk) { return (struct bcm_sock *)sk; @@ -396,7 +392,6 @@ static void bcm_tx_timeout_tsklet(unsigned long data) if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ - memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = TX_EXPIRED; msg_head.flags = op->flags; msg_head.count = op->count; @@ -444,7 +439,6 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) /* this element is not throttled anymore */ data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); - memset(&head, 0, sizeof(head)); head.opcode = RX_CHANGED; head.flags = op->flags; head.count = op->count; @@ -556,7 +550,6 @@ static void bcm_rx_timeout_tsklet(unsigned long data) struct bcm_msg_head msg_head; /* create notification to user */ - memset(&msg_head, 0, sizeof(msg_head)); msg_head.opcode = RX_TIMEOUT; msg_head.flags = op->flags; msg_head.count = op->count; @@ -737,21 +730,21 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, static void bcm_remove_op(struct bcm_op *op) { if (op->tsklet.func) { - do { - tasklet_kill(&op->tsklet); + while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || + test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || + hrtimer_active(&op->timer)) { hrtimer_cancel(&op->timer); - } while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) || - test_bit(TASKLET_STATE_RUN, &op->tsklet.state) || - hrtimer_active(&op->timer)); + tasklet_kill(&op->tsklet); + } } if (op->thrtsklet.func) { - do { - tasklet_kill(&op->thrtsklet); + while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || + test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || + hrtimer_active(&op->thrtimer)) { hrtimer_cancel(&op->thrtimer); - } while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) || - test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) || - hrtimer_active(&op->thrtimer)); + tasklet_kill(&op->thrtsklet); + } } if ((op->frames) && (op->frames != &op->sframe)) @@ -813,7 +806,6 @@ static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) bcm_rx_handler, op); list_del(&op->list); - synchronize_rcu(); bcm_remove_op(op); return 1; /* done */ } @@ -1393,15 +1385,20 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) /* * notification handler for netdevice status changes */ -static void bcm_notify(struct bcm_sock *bo, unsigned long msg, - struct net_device *dev) +static int bcm_notifier(struct notifier_block *nb, unsigned long msg, + void *ptr) { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); struct sock *sk = &bo->sk; struct bcm_op *op; int notify_enodev = 0; if (!net_eq(dev_net(dev), &init_net)) - return; + return NOTIFY_DONE; + + if (dev->type != ARPHRD_CAN) + return NOTIFY_DONE; switch (msg) { @@ -1436,28 +1433,7 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg, sk->sk_error_report(sk); } } -} -static int bcm_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (dev->type != ARPHRD_CAN) - return NOTIFY_DONE; - if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) - return NOTIFY_DONE; - if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ - return NOTIFY_DONE; - - spin_lock(&bcm_notifier_lock); - list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { - spin_unlock(&bcm_notifier_lock); - bcm_notify(bcm_busy_notifier, msg, dev); - spin_lock(&bcm_notifier_lock); - } - bcm_busy_notifier = NULL; - spin_unlock(&bcm_notifier_lock); return NOTIFY_DONE; } @@ -1477,9 +1453,9 @@ static int bcm_init(struct sock *sk) INIT_LIST_HEAD(&bo->rx_ops); /* set notifier */ - spin_lock(&bcm_notifier_lock); - list_add_tail(&bo->notifier, &bcm_notifier_list); - spin_unlock(&bcm_notifier_lock); + bo->notifier.notifier_call = bcm_notifier; + + register_netdevice_notifier(&bo->notifier); return 0; } @@ -1500,14 +1476,7 @@ static int bcm_release(struct socket *sock) /* remove bcm_ops, timer, rx_unregister(), etc. */ - spin_lock(&bcm_notifier_lock); - while (bcm_busy_notifier == bo) { - spin_unlock(&bcm_notifier_lock); - schedule_timeout_uninterruptible(1); - spin_lock(&bcm_notifier_lock); - } - list_del(&bo->notifier); - spin_unlock(&bcm_notifier_lock); + unregister_netdevice_notifier(&bo->notifier); lock_sock(sk); @@ -1539,12 +1508,8 @@ static int bcm_release(struct socket *sock) REGMASK(op->can_id), bcm_rx_handler, op); - } - - synchronize_rcu(); - - list_for_each_entry_safe(op, next, &bo->rx_ops, list) bcm_remove_op(op); + } /* remove procfs entry */ if (proc_dir && bo->bcm_proc_read) @@ -1697,10 +1662,6 @@ static const struct can_proto bcm_can_proto = { .prot = &bcm_proto, }; -static struct notifier_block canbcm_notifier = { - .notifier_call = bcm_notifier -}; - static int __init bcm_module_init(void) { int err; @@ -1715,8 +1676,6 @@ static int __init bcm_module_init(void) /* create /proc/net/can-bcm directory */ proc_dir = proc_mkdir("can-bcm", init_net.proc_net); - register_netdevice_notifier(&canbcm_notifier); - return 0; } @@ -1726,8 +1685,6 @@ static void __exit bcm_module_exit(void) if (proc_dir) remove_proc_entry("can-bcm", init_net.proc_net); - - unregister_netdevice_notifier(&canbcm_notifier); } module_init(bcm_module_init); diff --git a/net/can/gw.c b/net/can/gw.c index 1867000f8a65..81650affa3fa 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -497,7 +497,6 @@ static int cgw_notifier(struct notifier_block *nb, if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); } } @@ -942,7 +941,6 @@ static void cgw_remove_all_jobs(void) hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { hlist_del(&gwj->list); cgw_unregister_filter(gwj); - synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); } } @@ -1010,7 +1008,6 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) hlist_del(&gwj->list); cgw_unregister_filter(gwj); - synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); err = 0; break; diff --git a/net/can/raw.c b/net/can/raw.c index 1c2bf97ca168..e9403a26a1d5 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -84,7 +84,7 @@ struct raw_sock { struct sock sk; int bound; int ifindex; - struct list_head notifier; + struct notifier_block notifier; int loopback; int recv_own_msgs; int fd_frames; @@ -96,10 +96,6 @@ struct raw_sock { struct uniqframe __percpu *uniq; }; -static LIST_HEAD(raw_notifier_list); -static DEFINE_SPINLOCK(raw_notifier_lock); -static struct raw_sock *raw_busy_notifier; - /* * Return pointer to store the extra msg flags for raw_recvmsg(). * We use the space of one unsigned int beyond the 'struct sockaddr_can' @@ -264,16 +260,21 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) return err; } -static void raw_notify(struct raw_sock *ro, unsigned long msg, - struct net_device *dev) +static int raw_notifier(struct notifier_block *nb, + unsigned long msg, void *ptr) { + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); struct sock *sk = &ro->sk; if (!net_eq(dev_net(dev), &init_net)) - return; + return NOTIFY_DONE; + + if (dev->type != ARPHRD_CAN) + return NOTIFY_DONE; if (ro->ifindex != dev->ifindex) - return; + return NOTIFY_DONE; switch (msg) { @@ -302,28 +303,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, sk->sk_error_report(sk); break; } -} - -static int raw_notifier(struct notifier_block *nb, unsigned long msg, - void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (dev->type != ARPHRD_CAN) - return NOTIFY_DONE; - if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) - return NOTIFY_DONE; - if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */ - return NOTIFY_DONE; - spin_lock(&raw_notifier_lock); - list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) { - spin_unlock(&raw_notifier_lock); - raw_notify(raw_busy_notifier, msg, dev); - spin_lock(&raw_notifier_lock); - } - raw_busy_notifier = NULL; - spin_unlock(&raw_notifier_lock); return NOTIFY_DONE; } @@ -352,9 +332,9 @@ static int raw_init(struct sock *sk) return -ENOMEM; /* set notifier */ - spin_lock(&raw_notifier_lock); - list_add_tail(&ro->notifier, &raw_notifier_list); - spin_unlock(&raw_notifier_lock); + ro->notifier.notifier_call = raw_notifier; + + register_netdevice_notifier(&ro->notifier); return 0; } @@ -369,14 +349,7 @@ static int raw_release(struct socket *sock) ro = raw_sk(sk); - spin_lock(&raw_notifier_lock); - while (raw_busy_notifier == ro) { - spin_unlock(&raw_notifier_lock); - schedule_timeout_uninterruptible(1); - spin_lock(&raw_notifier_lock); - } - list_del(&ro->notifier); - spin_unlock(&raw_notifier_lock); + unregister_netdevice_notifier(&ro->notifier); lock_sock(sk); @@ -541,18 +514,10 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; } - rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { + if (ro->bound && ro->ifindex) dev = dev_get_by_index(&init_net, ro->ifindex); - if (!dev) { - if (count > 1) - kfree(filter); - err = -ENODEV; - goto out_fil; - } - } if (ro->bound) { /* (try to) register the new filters */ @@ -589,7 +554,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, dev_put(dev); release_sock(sk); - rtnl_unlock(); break; @@ -602,16 +566,10 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, err_mask &= CAN_ERR_MASK; - rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { + if (ro->bound && ro->ifindex) dev = dev_get_by_index(&init_net, ro->ifindex); - if (!dev) { - err = -ENODEV; - goto out_err; - } - } /* remove current error mask */ if (ro->bound) { @@ -633,7 +591,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, dev_put(dev); release_sock(sk); - rtnl_unlock(); break; @@ -900,10 +857,6 @@ static const struct can_proto raw_can_proto = { .prot = &raw_proto, }; -static struct notifier_block canraw_notifier = { - .notifier_call = raw_notifier -}; - static __init int raw_module_init(void) { int err; @@ -913,8 +866,6 @@ static __init int raw_module_init(void) err = can_proto_register(&raw_can_proto); if (err < 0) printk(KERN_ERR "can: registration of raw protocol failed\n"); - else - register_netdevice_notifier(&canraw_notifier); return err; } @@ -922,7 +873,6 @@ static __init int raw_module_init(void) static __exit void raw_module_exit(void) { can_proto_unregister(&raw_can_proto); - unregister_netdevice_notifier(&canraw_notifier); } module_init(raw_module_init); diff --git a/net/compat.c b/net/compat.c index 14459a87fdbc..20c5e5f215f2 100644 --- a/net/compat.c +++ b/net/compat.c @@ -159,7 +159,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, if (kcmlen > stackbuf_size) kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); if (kcmsg == NULL) - return -ENOMEM; + return -ENOBUFS; /* Now copy them over neatly. */ memset(kcmsg, 0, kcmlen); diff --git a/net/core/dev.c b/net/core/dev.c index c205193cab2f..1dc4781807ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4744,18 +4744,11 @@ EXPORT_SYMBOL(__napi_schedule); * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule * - * Variant of __napi_schedule() assuming hard irqs are masked. - * - * On PREEMPT_RT enabled kernels this maps to __napi_schedule() - * because the interrupt disabled assumption might not be true - * due to force-threaded interrupts and spinlock substitution. + * Variant of __napi_schedule() assuming hard irqs are masked */ void __napi_schedule_irqoff(struct napi_struct *n) { - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - else - __napi_schedule(n); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); @@ -7823,7 +7816,7 @@ static void __net_exit default_device_exit(struct net *net) continue; /* Leave virtual devices for the generic cleanup */ - if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) + if (dev->rtnl_link_ops) continue; /* Push remaining network devices to init_net */ diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 8d71ce5030d4..9bcc6fdade3e 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -223,17 +223,13 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) rcu_read_lock(); list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { - struct net_device *dev; - /* * only add a note to our monitor buffer if: * 1) this is the dev we received on * 2) its after the last_rx delta * 3) our rx_dropped count has gone up */ - /* Paired with WRITE_ONCE() in dropmon_net_event() */ - dev = READ_ONCE(new_stat->dev); - if ((dev == napi->dev) && + if ((new_stat->dev == napi->dev) && (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { trace_drop_common(NULL, NULL); @@ -348,10 +344,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, mutex_lock(&trace_state_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { - - /* Paired with READ_ONCE() in trace_napi_poll_hit() */ - WRITE_ONCE(new_stat->dev, NULL); - + new_stat->dev = NULL; if (trace_state == TRACE_OFF) { list_del_rcu(&new_stat->list); kfree_rcu(new_stat, rcu); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index dcd40a44b93d..ae577e820c39 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -765,7 +765,7 @@ static void notify_rule_change(int event, struct fib_rule *rule, { struct net *net; struct sk_buff *skb; - int err = -ENOMEM; + int err = -ENOBUFS; net = ops->fro_net; skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 31393e30b6b4..7044c2636985 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -597,7 +597,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, ASSERT_RTNL(); - n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL); + n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL); if (!n) goto out; @@ -1234,7 +1234,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, * we can reinject the packet there. */ n2 = NULL; - if (dst && dst->obsolete != DST_OBSOLETE_DEAD) { + if (dst) { n2 = dst_neigh_lookup_skb(dst, skb); if (n2) n1 = n2; diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index d6161fba15c3..77969b71a50a 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -208,23 +208,12 @@ static const struct file_operations softnet_seq_fops = { .release = seq_release, }; -static void *ptype_get_idx(struct seq_file *seq, loff_t pos) +static void *ptype_get_idx(loff_t pos) { - struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; - struct net_device *dev; loff_t i = 0; int t; - for_each_netdev_rcu(seq_file_net(seq), dev) { - ptype_list = &dev->ptype_all; - list_for_each_entry_rcu(pt, ptype_list, list) { - if (i == pos) - return pt; - ++i; - } - } - list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; @@ -245,40 +234,22 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); - return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; + return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct net_device *dev; struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) - return ptype_get_idx(seq, 0); + return ptype_get_idx(0); pt = v; nxt = pt->list.next; - if (pt->dev) { - if (nxt != &pt->dev->ptype_all) - goto found; - - dev = pt->dev; - for_each_netdev_continue_rcu(seq_file_net(seq), dev) { - if (!list_empty(&dev->ptype_all)) { - nxt = dev->ptype_all.next; - goto found; - } - } - - nxt = ptype_all.next; - goto ptype_all; - } - if (pt->type == htons(ETH_P_ALL)) { -ptype_all: if (nxt != &ptype_all) goto found; hash = 0; @@ -307,8 +278,7 @@ static int ptype_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); - else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && - (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { + else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 441973d89068..01bfe28b20a1 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -130,10 +130,8 @@ static void ops_exit_list(const struct pernet_operations *ops, { struct net *net; if (ops->exit) { - list_for_each_entry(net, net_exit_list, exit_list) { + list_for_each_entry(net, net_exit_list, exit_list) ops->exit(net); - cond_resched(); - } } if (ops->exit_batch) ops->exit_batch(net_exit_list); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 5d0759e2102e..4ea957c1e7ee 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3519,7 +3519,7 @@ static int pktgen_thread_worker(void *arg) struct pktgen_dev *pkt_dev = NULL; int cpu = t->cpu; - WARN_ON(smp_processor_id() != cpu); + BUG_ON(smp_processor_id() != cpu); init_waitqueue_head(&t->queue); complete(&t->start_done); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 6176e5ba5aa0..e2a0aed52983 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2198,9 +2198,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; - const struct rtnl_link_ops *m_ops; + const struct rtnl_link_ops *m_ops = NULL; struct net_device *dev; - struct net_device *master_dev; + struct net_device *master_dev = NULL; struct ifinfomsg *ifm; char kind[MODULE_NAME_LEN]; char ifname[IFNAMSIZ]; @@ -2231,8 +2231,6 @@ replay: dev = NULL; } - master_dev = NULL; - m_ops = NULL; if (dev) { master_dev = netdev_master_upper_dev_get(dev); if (master_dev) @@ -3242,10 +3240,6 @@ static int rtnl_bridge_notify(struct net_device *dev) if (err < 0) goto errout; - /* Notification info is only filled for bridge ports, not the bridge - * device itself. Therefore, a zero notification length is valid and - * should not result in an error. - */ if (!skb->len) goto errout; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b7734f91abd6..7b8bd8562879 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -429,11 +429,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE)) gfp_mask |= GFP_DMA; - /* If requested length is either too small or too big, - * we use kmalloc() for skb->head allocation. - */ - if (len <= SKB_WITH_OVERHEAD(1024) || - len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) @@ -510,17 +506,13 @@ EXPORT_SYMBOL(__netdev_alloc_skb); struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) { - struct page_frag_cache *nc; + struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; void *data; len += NET_SKB_PAD + NET_IP_ALIGN; - /* If requested length is either too small or too big, - * we use kmalloc() for skb->head allocation. - */ - if (len <= SKB_WITH_OVERHEAD(1024) || - len > SKB_WITH_OVERHEAD(PAGE_SIZE) || + if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); if (!skb) @@ -528,7 +520,6 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, goto skb_success; } - nc = this_cpu_ptr(&napi_alloc_cache); len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); len = SKB_DATA_ALIGN(len); @@ -1551,12 +1542,6 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) skb->csum = csum_block_sub(skb->csum, skb_checksum(skb, len, delta, 0), len); - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { - int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; - int offset = skb_checksum_start_offset(skb) + skb->csum_offset; - - if (offset + sizeof(__sum16) > hdlen) - return -EINVAL; } return __pskb_trim(skb, len); } @@ -2269,11 +2254,8 @@ skb_zerocopy_headlen(const struct sk_buff *from) if (!from->head_frag || skb_headlen(from) < L1_CACHE_BYTES || - skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { + skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) hlen = skb_headlen(from); - if (!hlen) - hlen = from->len; - } if (skb_has_frag_list(from)) hlen = from->len; @@ -2657,19 +2639,7 @@ EXPORT_SYMBOL(skb_split); */ static int skb_prepare_for_shift(struct sk_buff *skb) { - int ret = 0; - - if (skb_cloned(skb)) { - /* Save and restore truesize: pskb_expand_head() may reallocate - * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we - * cannot change truesize at this point. - */ - unsigned int save_truesize = skb->truesize; - - ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - skb->truesize = save_truesize; - } - return ret; + return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } /** diff --git a/net/core/sock.c b/net/core/sock.c index ffa103957631..cdf2ca201a4f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1014,6 +1014,7 @@ set_rcvbuf: } EXPORT_SYMBOL(sock_setsockopt); + static void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred) { @@ -1173,11 +1174,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, struct ucred peercred; if (len > sizeof(peercred)) len = sizeof(peercred); - - spin_lock(&sk->sk_peer_lock); cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); - spin_unlock(&sk->sk_peer_lock); - if (copy_to_user(optval, &peercred, len)) return -EFAULT; goto lenout; @@ -1474,10 +1471,9 @@ static void __sk_destruct(struct rcu_head *head) sk->sk_frag.page = NULL; } - /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ - put_cred(sk->sk_peer_cred); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); - if (likely(sk->sk_net_refcnt)) put_net(sock_net(sk)); sk_prot_free(sk->sk_prot_creator, sk); @@ -2461,8 +2457,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_peer_pid = NULL; sk->sk_peer_cred = NULL; - spin_lock_init(&sk->sk_peer_lock); - sk->sk_write_pending = 0; sk->sk_rcvlowat = 1; sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; diff --git a/net/core/stream.c b/net/core/stream.c index 2c50c71cb806..3089b014bb53 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -194,6 +194,9 @@ void sk_stream_kill_queues(struct sock *sk) /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); + /* Next, the error queue. */ + __skb_queue_purge(&sk->sk_error_queue); + /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 75dd45210316..6fe2b615518c 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1725,8 +1725,6 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh) fn = &reply_funcs[dcb->cmd]; if (!fn->cb) return -EOPNOTSUPP; - if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) - return -EPERM; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; @@ -1937,54 +1935,10 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) } EXPORT_SYMBOL(dcb_ieee_delapp); -static void dcbnl_flush_dev(struct net_device *dev) -{ - struct dcb_app_type *itr, *tmp; - - spin_lock_bh(&dcb_lock); - - list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) { - if (itr->ifindex == dev->ifindex) { - list_del(&itr->list); - kfree(itr); - } - } - - spin_unlock_bh(&dcb_lock); -} - -static int dcbnl_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - switch (event) { - case NETDEV_UNREGISTER: - if (!dev->dcbnl_ops) - return NOTIFY_DONE; - - dcbnl_flush_dev(dev); - - return NOTIFY_OK; - default: - return NOTIFY_DONE; - } -} - -static struct notifier_block dcbnl_nb __read_mostly = { - .notifier_call = dcbnl_netdevice_event, -}; - static int __init dcbnl_init(void) { - int err; - INIT_LIST_HEAD(&dcb_app_list); - err = register_netdevice_notifier(&dcbnl_nb); - if (err) - return err; - rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index e50fc19690c8..b0e28d24e1a7 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -44,9 +44,9 @@ extern bool dccp_debug; #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) #else -#define dccp_pr_debug(format, a...) do {} while (0) -#define dccp_pr_debug_cat(format, a...) do {} while (0) -#define dccp_debug(format, a...) do {} while (0) +#define dccp_pr_debug(format, a...) +#define dccp_pr_debug_cat(format, a...) +#define dccp_debug(format, a...) #endif extern struct inet_hashinfo dccp_hashinfo; diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index bb1a7405dc0e..736cc95b5201 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -313,11 +313,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) return 0; /* discard, don't send a reset here */ - if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { - IP6_INC_STATS_BH(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); - return 0; - } - if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 1f03a590288d..68eed344b471 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -92,8 +92,6 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, newdp->dccps_role = DCCP_ROLE_SERVER; newdp->dccps_hc_rx_ackvec = NULL; newdp->dccps_service_list = NULL; - newdp->dccps_hc_rx_ccid = NULL; - newdp->dccps_hc_tx_ccid = NULL; newdp->dccps_service = dreq->dreq_service; newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index ee297964fcd2..9d8fcdefefc0 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -823,7 +823,7 @@ static int dn_auto_bind(struct socket *sock) static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT_FUNC(wait, woken_wake_function); + DEFINE_WAIT(wait); int err; if (scp->state != DN_CR) @@ -833,11 +833,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_send_conn_conf(sk, allocation); - add_wait_queue(sk_sleep(sk), &wait); + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CC) - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -851,8 +851,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) err = -EAGAIN; if (!*timeo) break; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - remove_wait_queue(sk_sleep(sk), &wait); + finish_wait(sk_sleep(sk), &wait); if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CC) { @@ -864,7 +865,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) static int dn_wait_run(struct sock *sk, long *timeo) { struct dn_scp *scp = DN_SK(sk); - DEFINE_WAIT_FUNC(wait, woken_wake_function); + DEFINE_WAIT(wait); int err = 0; if (scp->state == DN_RUN) @@ -873,11 +874,11 @@ static int dn_wait_run(struct sock *sk, long *timeo) if (!*timeo) return -EALREADY; - add_wait_queue(sk_sleep(sk), &wait); + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CI || scp->state == DN_CC) - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) @@ -891,8 +892,9 @@ static int dn_wait_run(struct sock *sk, long *timeo) err = -ETIMEDOUT; if (!*timeo) break; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - remove_wait_queue(sk_sleep(sk), &wait); + finish_wait(sk_sleep(sk), &wait); out: if (err == 0) { sk->sk_socket->state = SS_CONNECTED; @@ -1037,16 +1039,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) { - DEFINE_WAIT_FUNC(wait, woken_wake_function); + DEFINE_WAIT(wait); struct sk_buff *skb = NULL; int err = 0; - add_wait_queue(sk_sleep(sk), &wait); + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { - *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); + *timeo = schedule_timeout(*timeo); skb = skb_dequeue(&sk->sk_receive_queue); } lock_sock(sk); @@ -1061,8 +1063,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) err = -EAGAIN; if (!*timeo) break; + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - remove_wait_queue(sk_sleep(sk), &wait); + finish_wait(sk_sleep(sk), &wait); return skb == NULL ? ERR_PTR(err) : skb; } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index afcde16a94e2..b3d32cb71801 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -297,8 +297,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest); if (!node_dst) { - if (net_ratelimit()) - netdev_err(skb->dev, "%s: Unknown node\n", __func__); + WARN_ONCE(1, "%s: Unknown node\n", __func__); return; } if (port->type != node_dst->AddrB_port) diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index fe31df8dc804..3503c38954f9 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -557,7 +557,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]); if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) { - if (!info->attrs[IEEE802154_ATTR_PAN_ID]) + if (!info->attrs[IEEE802154_ATTR_PAN_ID] && + !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] || + info->attrs[IEEE802154_ATTR_HW_ADDR])) return -EINVAL; desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]); @@ -566,9 +568,6 @@ ieee802154_llsec_parse_key_id(struct genl_info *info, desc->device_addr.mode = IEEE802154_ADDR_SHORT; desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]); } else { - if (!info->attrs[IEEE802154_ATTR_HW_ADDR]) - return -EINVAL; - desc->device_addr.mode = IEEE802154_ADDR_LONG; desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]); } @@ -685,10 +684,8 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info) nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, be32_to_cpu(params.frame_counter)) || - ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) { - rc = -ENOBUFS; + ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) goto out_free; - } dev_put(dev); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 11f53dc0c1c0..77d73014bde3 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -249,10 +249,8 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) } if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || - nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) { - rc = -EMSGSIZE; + nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) goto nla_put_failure; - } dev_put(dev); wpan_phy_put(phy); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 770073e40a2d..16ef0d9f566e 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -843,13 +843,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, goto nla_put_failure; #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - goto out; - if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) goto nla_put_failure; - -out: #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ genlmsg_end(msg, hdr); @@ -1372,9 +1367,6 @@ static int nl802154_set_llsec_params(struct sk_buff *skb, u32 changed = 0; int ret; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EOPNOTSUPP; - if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { u8 enabled; @@ -1423,7 +1415,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -ENOBUFS; + return -1; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1481,11 +1473,6 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { - err = skb->len; - goto out_err; - } - if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1540,8 +1527,7 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) struct ieee802154_llsec_key_id id = { }; u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { }; - if (!info->attrs[NL802154_ATTR_SEC_KEY] || - nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy)) return -EINVAL; @@ -1591,8 +1577,7 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; struct ieee802154_llsec_key_id id; - if (!info->attrs[NL802154_ATTR_SEC_KEY] || - nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy)) return -EINVAL; @@ -1614,7 +1599,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -ENOBUFS; + return -1; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1658,11 +1643,6 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { - err = skb->len; - goto out_err; - } - if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1750,9 +1730,6 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info) struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_device dev_desc; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EOPNOTSUPP; - if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE], &dev_desc) < 0) return -EINVAL; @@ -1768,8 +1745,7 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; __le64 extended_addr; - if (!info->attrs[NL802154_ATTR_SEC_DEVICE] || - nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, + if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy)) return -EINVAL; @@ -1792,7 +1768,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -ENOBUFS; + return -1; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1839,11 +1815,6 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { - err = skb->len; - goto out_err; - } - if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -1901,9 +1872,6 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info struct ieee802154_llsec_device_key key; __le64 extended_addr; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EOPNOTSUPP; - if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], @@ -1937,8 +1905,7 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info struct ieee802154_llsec_device_key key; __le64 extended_addr; - if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || - nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, + if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy)) return -EINVAL; @@ -1969,7 +1936,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -ENOBUFS; + return -1; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -2013,11 +1980,6 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb) if (err) return err; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) { - err = skb->len; - goto out_err; - } - if (!wpan_dev->netdev) { err = -EINVAL; goto out_err; @@ -2103,9 +2065,6 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb, struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_seclevel sl; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EOPNOTSUPP; - if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], &sl) < 0) return -EINVAL; @@ -2121,9 +2080,6 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb, struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct ieee802154_llsec_seclevel sl; - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EOPNOTSUPP; - if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], &sl) < 0) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 42ab1b61b513..cb6c0772ea36 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -983,11 +983,6 @@ static const struct proto_ops ieee802154_dgram_ops = { #endif }; -static void ieee802154_sock_destruct(struct sock *sk) -{ - skb_queue_purge(&sk->sk_receive_queue); -} - /* Create a socket. Initialise the socket, blank the addresses * set the state. */ @@ -1028,7 +1023,7 @@ static int ieee802154_create(struct net *net, struct socket *sock, sock->ops = ops; sock_init_data(sock, sk); - sk->sk_destruct = ieee802154_sock_destruct; + /* FIXME: sk->sk_destruct */ sk->sk_family = PF_IEEE802154; /* Checksums on by default */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1919e60c2e75..55eff963d1fe 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1278,11 +1278,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ops = rcu_dereference(inet_offloads[proto]); - if (likely(ops && ops->callbacks.gso_segment)) { + if (likely(ops && ops->callbacks.gso_segment)) segs = ops->callbacks.gso_segment(skb, features); - if (!segs) - skb->network_header = skb_mac_header(skb) + nhoff - skb->head; - } if (IS_ERR_OR_NULL(segs)) goto out; @@ -1805,10 +1802,6 @@ static int __init inet_init(void) tcp_v4_init(); - /* Initialise per-cpu ipv4 mibs */ - if (init_ipv4_mibs()) - panic("%s: Cannot init ipv4 mibs\n", __func__); - /* Setup TCP slab cache for open requests. */ tcp_init(); @@ -1837,6 +1830,12 @@ static int __init inet_init(void) if (init_inet_pernet_ops()) pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); + /* + * Initialise per-cpu ipv4 mibs + */ + + if (init_ipv4_mibs()) + pr_crit("%s: Cannot init ipv4 mibs\n", __func__); ipv4_proc_init(); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e798e27b3c7d..0e83c5b08e0e 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -557,7 +557,6 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); - kfree(doi_def->map.std); break; } kfree(doi_def); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7accf1ebe947..edf007a20af4 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2243,7 +2243,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name, free: kfree(t); out: - return -ENOMEM; + return -ENOBUFS; } static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c01149331f46..632d28592933 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -299,7 +299,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_oif = l3mdev_master_ifindex_rcu(dev), .daddr = ip_hdr(skb)->saddr, - .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK, + .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_scope = scope, .flowi4_mark = vmark ? skb->mark : 0, }; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 358d9dabda1d..d3685fa18246 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -461,23 +461,6 @@ static int icmp_multipath_hash_skb(const struct sk_buff *skb) #endif -/* - * The device used for looking up which routing table to use for sending an ICMP - * error is preferably the source whenever it is set, which should ensure the - * icmp error can be sent to the source host, else lookup using the routing - * table of the destination device, else use the main routing table (index 0). - */ -static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) -{ - struct net_device *route_lookup_dev = NULL; - - if (skb->dev) - route_lookup_dev = skb->dev; - else if (skb_dst(skb)) - route_lookup_dev = skb_dst(skb)->dev; - return route_lookup_dev; -} - static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, @@ -486,7 +469,6 @@ static struct rtable *icmp_route_lookup(struct net *net, int type, int code, struct icmp_bxm *param) { - struct net_device *route_lookup_dev; struct rtable *rt, *rt2; struct flowi4 fl4_dec; int err; @@ -501,8 +483,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; - route_lookup_dev = icmp_get_route_lookup_dev(skb_in); - fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); + fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key_hash(net, fl4, @@ -527,7 +508,7 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - if (inet_addr_type_dev_table(net, route_lookup_dev, + if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 7b0bbda676b3..c67efa3e79dd 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2631,7 +2631,6 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u rv = 1; } else if (im) { if (src_addr) { - spin_lock_bh(&im->lock); for (psf = im->sources; psf; psf = psf->sf_next) { if (psf->sf_inaddr == src_addr) break; @@ -2642,7 +2641,6 @@ int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u im->sfcount[MCAST_EXCLUDE]; else rv = im->sfcount[MCAST_EXCLUDE] != 0; - spin_unlock_bh(&im->lock); } else rv = 1; /* unspecified source; tentatively allow */ } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 63f7bacf628a..900ee28bda99 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -502,10 +502,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool csum) { - unsigned char *skb_checksum_start = skb->head + skb->csum_start; - - if (csum && skb_checksum_start < skb->data) - return ERR_PTR(-EINVAL); return iptunnel_handle_offloads(skb, csum, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 080470394612..0750126f3b7d 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -155,19 +155,12 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; - /* Do not bother generating IPID for small packets (eg SYNACK) */ - if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { + if (ip_dont_fragment(sk, &rt->dst)) { iph->frag_off = htons(IP_DF); iph->id = 0; } else { iph->frag_off = 0; - /* TCP packets here are SYNACK with fat IPv4/TCP options. - * Avoid using the hashed IP ident generator. - */ - if (sk->sk_protocol == IPPROTO_TCP) - iph->id = (__force __be16)prandom_u32(); - else - __ip_select_ident(net, iph, 1); + __ip_select_ident(net, iph, 1); } if (opt && opt->opt.optlen) { @@ -290,7 +283,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk if (skb_is_gso(skb)) return ip_finish_output_gso(net, sk, skb, mtu); - if (skb->len > mtu || IPCB(skb)->frag_max_size) + if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) return ip_fragment(net, sk, skb, mtu, ip_finish_output2); return ip_finish_output2(net, sk, skb); @@ -383,9 +376,8 @@ static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) { BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); - - iph->saddr = fl4->saddr; - iph->daddr = fl4->daddr; + memcpy(&iph->saddr, &fl4->saddr, + sizeof(fl4->saddr) + sizeof(fl4->daddr)); } /* Note: skb->sk can be different from sk, in case of tunnels */ diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index dc92780f9e8c..3d9761516683 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -708,11 +708,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - df = tnl_params->frag_off; - if (skb->protocol == htons(ETH_P_IP)) - df |= (inner_iph->frag_off & htons(IP_DF)); - - if (tnl_update_pmtu(dev, skb, rt, df, inner_iph)) { + if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { ip_rt_put(rt); goto tx_error; } @@ -740,6 +736,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ttl = ip4_dst_hoplimit(&rt->dst); } + df = tnl_params->frag_off; + if (skb->protocol == htons(ETH_P_IP)) + df |= (inner_iph->frag_off&htons(IP_DF)); + max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); if (max_headroom > dev->needed_headroom) diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 173777aa5add..60f564db25a3 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -890,7 +890,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d /* - * Copy BOOTP-supplied string + * Copy BOOTP-supplied string if not already set. */ static int __init ic_bootp_string(char *dest, char *src, int len, int max) { @@ -941,15 +941,12 @@ static void __init ic_do_bootp_ext(u8 *ext) } break; case 12: /* Host name */ - if (!ic_host_name_set) { - ic_bootp_string(utsname()->nodename, ext+1, *ext, - __NEW_UTS_LEN); - ic_host_name_set = 1; - } + ic_bootp_string(utsname()->nodename, ext+1, *ext, + __NEW_UTS_LEN); + ic_host_name_set = 1; break; case 15: /* Domain name (DNS) */ - if (!ic_domain[0]) - ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain)); + ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain)); break; case 17: /* Root path */ if (!root_server_path[0]) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c4c2815b4f9b..1cb865fcc91b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -266,9 +266,7 @@ static int __net_init ipmr_rules_init(struct net *net) return 0; err2: - rtnl_lock(); ipmr_free_table(mrt); - rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index ea164fd61a7c..574697326ebc 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1349,8 +1349,6 @@ static int translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; - memset(newinfo->entries, 0, size); - newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 73b1d8e64658..53d664a7774c 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -964,6 +964,10 @@ copy_entries_to_user(unsigned int total_size, return PTR_ERR(counters); loc_cpu_entry = private->entries; + if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { + ret = -EFAULT; + goto free_counters; + } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ @@ -973,10 +977,6 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); - if (copy_to_user(userptr + off, e, sizeof(*e))) { - ret = -EFAULT; - goto free_counters; - } if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], @@ -990,14 +990,23 @@ copy_entries_to_user(unsigned int total_size, i += m->u.match_size) { m = (void *)e + i; - if (xt_match_to_user(m, userptr + off + i)) { + if (copy_to_user(userptr + off + i + + offsetof(struct xt_entry_match, + u.user.name), + m->u.kernel.match->name, + strlen(m->u.kernel.match->name)+1) + != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); - if (xt_target_to_user(t, userptr + off + e->target_offset)) { + if (copy_to_user(userptr + off + e->target_offset + + offsetof(struct xt_entry_target, + u.user.name), + t->u.kernel.target->name, + strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } @@ -1601,8 +1610,6 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; - memset(newinfo->entries, 0, size); - newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 28bcde0a2749..16599bae11dd 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -478,7 +478,6 @@ static struct xt_target clusterip_tg_reg __read_mostly = { .checkentry = clusterip_tg_check, .destroy = clusterip_tg_destroy, .targetsize = sizeof(struct ipt_clusterip_tgt_info), - .usersize = offsetof(struct ipt_clusterip_tgt_info, config), #ifdef CONFIG_COMPAT .compatsize = sizeof(struct compat_ipt_clusterip_tgt_info), #endif /* CONFIG_COMPAT */ diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index 32a363465e0a..78cc64eddfc1 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -92,7 +92,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) flow.saddr = rpfilter_get_saddr(iph->daddr); flow.flowi4_oif = 0; flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; - flow.flowi4_tos = iph->tos & IPTOS_RT_MASK; + flow.flowi4_tos = RT_TOS(iph->tos); flow.flowi4_scope = RT_SCOPE_UNIVERSE; return rpfilter_lookup_reverse(par->net, &flow, par->in, info->flags) ^ invert; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index c3f808602b65..3c698f575eb3 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -979,7 +979,6 @@ bool ping_rcv(struct sk_buff *skb) struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); - bool rc = false; /* We assume the packet has already been checked by icmp_rcv */ @@ -994,15 +993,14 @@ bool ping_rcv(struct sk_buff *skb) struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); pr_debug("rcv on socket %p\n", sk); - if (skb2 && !ping_queue_rcv_skb(sk, skb2)) - rc = true; + if (skb2) + ping_queue_rcv_skb(sk, skb2); sock_put(sk); + return true; } + pr_debug("no socket, dropping\n"); - if (!rc) - pr_debug("no socket, dropping\n"); - - return rc; + return false; } EXPORT_SYMBOL_GPL(ping_rcv); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 81ade975db5f..051ed5732a81 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -709,7 +709,6 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int ret = -EINVAL; int chk_addr_ret; - lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); @@ -722,9 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; -out: - release_sock(sk); - return ret; +out: return ret; } /* diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f79f9a6dd046..a9df7487c929 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #include #include @@ -464,10 +463,8 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, return neigh_create(&arp_tbl, pkey, dev); } -/* Hash tables of size 2048..262144 depending on RAM size. - * Each bucket uses 8 bytes. - */ -static u32 ip_idents_mask __read_mostly; +#define IP_IDENTS_SZ 2048u + static atomic_t *ip_idents __read_mostly; static u32 *ip_tstamps __read_mostly; @@ -477,16 +474,12 @@ static u32 *ip_tstamps __read_mostly; */ u32 ip_idents_reserve(u32 hash, int segs) { - u32 bucket, old, now = (u32)jiffies; - atomic_t *p_id; - u32 *p_tstamp; + u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; + atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; + u32 old = ACCESS_ONCE(*p_tstamp); + u32 now = (u32)jiffies; u32 delta = 0; - bucket = hash & ip_idents_mask; - p_tstamp = ip_tstamps + bucket; - p_id = ip_idents + bucket; - old = ACCESS_ONCE(*p_tstamp); - if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); @@ -600,25 +593,18 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe) } } -static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash) +static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { - struct fib_nh_exception __rcu **fnhe_p, **oldest_p; - struct fib_nh_exception *fnhe, *oldest = NULL; + struct fib_nh_exception *fnhe, *oldest; - for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) { - fnhe = rcu_dereference_protected(*fnhe_p, - lockdep_is_held(&fnhe_lock)); - if (!fnhe) - break; - if (!oldest || - time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) { + oldest = rcu_dereference(hash->chain); + for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; + fnhe = rcu_dereference(fnhe->fnhe_next)) { + if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; - oldest_p = fnhe_p; - } } fnhe_flush_routes(oldest); - *oldest_p = oldest->fnhe_next; - kfree_rcu(oldest, rcu); + return oldest; } static inline u32 fnhe_hashfun(__be32 daddr) @@ -695,21 +681,16 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, if (rt) fill_route_from_fnhe(rt, fnhe); } else { - /* Randomize max depth to avoid some side channels attacks. */ - int max_depth = FNHE_RECLAIM_DEPTH + - prandom_u32_max(FNHE_RECLAIM_DEPTH); - - while (depth > max_depth) { - fnhe_remove_oldest(hash); - depth--; + if (depth > FNHE_RECLAIM_DEPTH) + fnhe = fnhe_oldest(hash); + else { + fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); + if (!fnhe) + goto out_unlock; + + fnhe->fnhe_next = hash->chain; + rcu_assign_pointer(hash->chain, fnhe); } - - fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); - if (!fnhe) - goto out_unlock; - - fnhe->fnhe_next = hash->chain; - fnhe->fnhe_genid = genid; fnhe->fnhe_daddr = daddr; fnhe->fnhe_gw = gw; @@ -717,8 +698,6 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, fnhe->fnhe_mtu_locked = lock; fnhe->fnhe_expires = expires; - rcu_assign_pointer(hash->chain, fnhe); - /* Exception created; mark the cached routes for the nexthop * stale, so anyone caching it rechecks if this exception * applies to them. @@ -2974,27 +2953,18 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly; int __init ip_rt_init(void) { - void *idents_hash; int rc = 0; int cpu; - /* For modern hosts, this will use 2 MB of memory */ - idents_hash = alloc_large_system_hash("IP idents", - sizeof(*ip_idents) + sizeof(*ip_tstamps), - 0, - 16, /* one bucket per 64 KB */ - 0, - NULL, - &ip_idents_mask, - 2048, - 256*1024); - - ip_idents = idents_hash; + ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); + if (!ip_idents) + panic("IP: failed to allocate ip_idents\n"); - prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents)); + prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); - ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents); - memset(ip_tstamps, 0, (ip_idents_mask + 1) * sizeof(*ip_tstamps)); + ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL); + if (!ip_tstamps) + panic("IP: failed to allocate ip_tstamps\n"); for_each_possible_cpu(cpu) { struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index e0b3b194b604..9fb3a5e83a7c 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -342,6 +342,8 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) return; if (tcp_in_slow_start(tp)) { + if (hystart && after(ack, ca->end_seq)) + bictcp_hystart_reset(sk); acked = tcp_slow_start(tp, acked); if (!acked) return; @@ -392,9 +394,6 @@ static void hystart_update(struct sock *sk, u32 delay) if (ca->found & hystart_detect) return; - if (after(tp->snd_una, ca->end_seq)) - bictcp_hystart_reset(sk); - if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock(); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e373dcdab5e2..161896d6dabb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -277,7 +277,7 @@ void tcp_v4_mtu_reduced(struct sock *sk) if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; - mtu = READ_ONCE(tcp_sk(sk)->mtu_info); + mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; @@ -444,7 +444,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (sk->sk_state == TCP_LISTEN) goto out; - WRITE_ONCE(tp->mtu_info, info); + tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { @@ -2044,7 +2044,6 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; - int bucket = st->bucket; int offset = st->offset; int orig_num = st->num; void *rc = NULL; @@ -2055,7 +2054,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); - while (offset-- && rc && bucket == st->bucket) + while (offset-- && rc) rc = listening_get_next(seq, rc); if (rc) break; @@ -2066,7 +2065,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq) if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); - while (offset-- && rc && bucket == st->bucket) + while (offset-- && rc) rc = established_get_next(seq, rc); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 17388adedc7d..26de4da6341f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1353,7 +1353,6 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) return __tcp_mtu_to_mss(sk, pmtu) - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); } -EXPORT_SYMBOL(tcp_mtu_to_mss); /* Inverse of above */ int tcp_mss_to_mtu(struct sock *sk, int mss) @@ -1502,8 +1501,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) * window, and remember whether we were cwnd-limited then. */ if (!before(tp->snd_una, tp->max_packets_seq) || - tp->packets_out > tp->max_packets_out || - is_cwnd_limited) { + tp->packets_out > tp->max_packets_out) { tp->max_packets_out = tp->packets_out; tp->max_packets_seq = tp->snd_nxt; tp->is_cwnd_limited = is_cwnd_limited; @@ -2174,10 +2172,6 @@ repair: break; } - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); - if (likely(sent_pkts || is_cwnd_limited)) - tcp_cwnd_validate(sk, is_cwnd_limited); - if (likely(sent_pkts)) { if (tcp_in_cwnd_reduction(sk)) tp->prr_out += sent_pkts; @@ -2185,6 +2179,8 @@ repair: /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk); + is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); + tcp_cwnd_validate(sk, is_cwnd_limited); return false; } return !tp->packets_out && tcp_send_head(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 3f805e0b0561..3ff09be87cf2 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2490,7 +2490,7 @@ int udp4_seq_show(struct seq_file *seq, void *v) { seq_setwidth(seq, 127); if (v == SEQ_START_TOKEN) - seq_puts(seq, " sl local_address rem_address st tx_queue " + seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 2a37f367dc04..6dfc3daf7c21 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -300,7 +300,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, int flush = 1; if (NAPI_GRO_CB(skb)->encap_mark || - (uh->check && skb->ip_summed != CHECKSUM_PARTIAL && + (skb->ip_summed != CHECKSUM_PARTIAL && NAPI_GRO_CB(skb)->csum_cnt == 0 && !NAPI_GRO_CB(skb)->csum_valid)) goto out; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 016fbf98ba7e..0a6bcf7e7b54 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -16,7 +16,6 @@ #include #include #include -#include static struct xfrm_policy_afinfo xfrm4_policy_afinfo; @@ -128,7 +127,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) fl4->flowi4_proto = iph->protocol; fl4->daddr = reverse ? iph->saddr : iph->daddr; fl4->saddr = reverse ? iph->daddr : iph->saddr; - fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK; + fl4->flowi4_tos = iph->tos; if (!ip_is_fragment(iph)) { switch (iph->protocol) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 55c1cbfdf1ff..441f6519e2e7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2302,7 +2302,6 @@ static void addrconf_add_mroute(struct net_device *dev) .fc_dst_len = 8, .fc_flags = RTF_UP, .fc_nlinfo.nl_net = dev_net(dev), - .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); @@ -2397,12 +2396,6 @@ static void manage_tempaddrs(struct inet6_dev *idev, } } -static bool is_addr_mode_generate_stable(struct inet6_dev *idev) -{ - return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY || - idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM; -} - void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) { struct prefix_info *pinfo; @@ -2519,7 +2512,8 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) in6_dev->token.s6_addr + 8, 8); read_unlock_bh(&in6_dev->lock); tokenized = true; - } else if (is_addr_mode_generate_stable(in6_dev) && + } else if (in6_dev->addr_gen_mode == + IN6_ADDR_GEN_MODE_STABLE_PRIVACY && !ipv6_generate_stable_address(&addr, 0, in6_dev)) { addr_flags |= IFA_F_STABLE_PRIVACY; @@ -3119,17 +3113,6 @@ retry: return 0; } -static void ipv6_gen_mode_random_init(struct inet6_dev *idev) -{ - struct ipv6_stable_secret *s = &idev->cnf.stable_secret; - - if (s->initialized) - return; - s = &idev->cnf.stable_secret; - get_random_bytes(&s->secret, sizeof(s->secret)); - s->initialized = true; -} - static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) { struct in6_addr addr; @@ -3140,18 +3123,13 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0); - switch (idev->addr_gen_mode) { - case IN6_ADDR_GEN_MODE_RANDOM: - ipv6_gen_mode_random_init(idev); - /* fallthrough */ - case IN6_ADDR_GEN_MODE_STABLE_PRIVACY: + if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) { if (!ipv6_generate_stable_address(&addr, 0, idev)) addrconf_add_linklocal(idev, &addr, IFA_F_STABLE_PRIVACY); else if (prefix_route) addrconf_prefix_route(&addr, 64, idev->dev, 0, 0); - break; - case IN6_ADDR_GEN_MODE_EUI64: + } else if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) { /* addrconf_add_linklocal also adds a prefix_route and we * only need to care about prefix routes if ipv6_generate_eui64 * couldn't generate one. @@ -3160,11 +3138,6 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route) addrconf_add_linklocal(idev, &addr, 0); else if (prefix_route) addrconf_prefix_route(&addr, 64, idev->dev, 0, 0); - break; - case IN6_ADDR_GEN_MODE_NONE: - default: - /* will not add any link local address */ - break; } } @@ -3182,7 +3155,6 @@ static void addrconf_dev_config(struct net_device *dev) (dev->type != ARPHRD_IEEE1394) && (dev->type != ARPHRD_TUNNEL6) && (dev->type != ARPHRD_6LOWPAN) && - (dev->type != ARPHRD_NONE) && (dev->type != ARPHRD_RAWIP) && (dev->type != ARPHRD_INFINIBAND)) { /* Alas, we support only Ethernet autoconfiguration. */ @@ -3193,11 +3165,6 @@ static void addrconf_dev_config(struct net_device *dev) if (IS_ERR(idev)) return; - /* this device type has no EUI support */ - if (dev->type == ARPHRD_NONE && - idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) - idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM; - addrconf_addr_gen(idev, false); } @@ -5074,8 +5041,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) if (mode != IN6_ADDR_GEN_MODE_EUI64 && mode != IN6_ADDR_GEN_MODE_NONE && - mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY && - mode != IN6_ADDR_GEN_MODE_RANDOM) + mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY) return -EINVAL; if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY && diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 17eee5083548..80e03a50d92d 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -350,6 +350,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, if (!(nt->parms.o_flags & GRE_SEQ)) dev->features |= NETIF_F_LLTX; + dev_hold(dev); ip6gre_tunnel_link(ign, nt); return nt; @@ -1313,6 +1314,8 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) strcpy(tunnel->parms.name, dev->name); tunnel->hlen = sizeof(struct ipv6hdr) + 4; + + dev_hold(dev); } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 31ac3c56da4b..0089407f5ebf 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -161,6 +161,16 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; + /* While RFC4291 is not explicit about v4mapped addresses + * in IPv6 headers, it seems clear linux dual-stack + * model can not deal properly with these. + * Security models could be fooled by ::ffff:127.0.0.1 for example. + * + * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 + */ + if (ipv6_addr_v4mapped(&hdr->saddr)) + goto err; + skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 629d51230f51..59127b41414f 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -109,8 +109,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); - if (!segs) - skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR(segs)) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 409d0d3616ec..b0dd0aec641c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1258,6 +1258,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (np->frag_size) mtu = np->frag_size; } + if (mtu < IPV6_MIN_MTU) + return -EINVAL; cork->base.fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->base.flags |= IPCORK_ALLFRAG; @@ -1305,6 +1307,8 @@ static int __ip6_append_data(struct sock *sk, fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); + maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - + sizeof(struct frag_hdr); headersize = sizeof(struct ipv6hdr) + (opt ? opt->opt_flen + opt->opt_nflen : 0) + @@ -1312,13 +1316,6 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; - if (mtu < fragheaderlen || - ((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr)) - goto emsgsize; - - maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - - sizeof(struct frag_hdr); - /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit * the first fragment */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 842bdb2d3d8c..48a2605bed28 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -261,6 +261,7 @@ static int ip6_tnl_create2(struct net_device *dev) strcpy(t->parms.name, dev->name); + dev_hold(dev); ip6_tnl_link(ip6n, t); return 0; @@ -917,12 +918,12 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, ldev = dev_get_by_index_rcu(net, p->link); if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) - pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", - p->name); + pr_warn("%s xmit: Local address not yet configured!\n", + p->name); else if (!ipv6_addr_is_multicast(raddr) && unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) - pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", - p->name); + pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", + p->name); else ret = 1; rcu_read_unlock(); @@ -1583,7 +1584,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev) return ret; } - dev_hold(dev); return 0; } @@ -1617,6 +1617,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); t->parms.proto = IPPROTO_IPV6; + dev_hold(dev); rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 9df1947e79eb..8151c7b9659b 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -196,6 +196,7 @@ static int vti6_tnl_create2(struct net_device *dev) strcpy(t->parms.name, dev->name); + dev_hold(dev); vti6_tnl_link(ip6n, t); return 0; @@ -760,8 +761,6 @@ vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); - memset(&p1, 0, sizeof(p1)); - switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { @@ -902,7 +901,6 @@ static inline int vti6_dev_init_gen(struct net_device *dev) dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; - dev_hold(dev); return 0; } @@ -934,6 +932,7 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) struct vti6_net *ip6n = net_generic(net, vti6_net_id); t->parms.proto = IPPROTO_IPV6; + dev_hold(dev); rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 1ba6f7df5a73..20812e8b24dd 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -251,9 +251,7 @@ static int __net_init ip6mr_rules_init(struct net *net) return 0; err2: - rtnl_lock(); ip6mr_free_table(mrt); - rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 636425999aac..2d28f0b54494 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1573,7 +1573,10 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ + /* limit our allocations to order-0 page */ + size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); + if (!skb) return NULL; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f57b72771e17..2393e1e09d69 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -343,7 +343,6 @@ ip6t_do_table(struct sk_buff *skb, * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ - acpar.fragoff = 0; acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; @@ -980,6 +979,10 @@ copy_entries_to_user(unsigned int total_size, return PTR_ERR(counters); loc_cpu_entry = private->entries; + if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { + ret = -EFAULT; + goto free_counters; + } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ @@ -989,10 +992,6 @@ copy_entries_to_user(unsigned int total_size, const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); - if (copy_to_user(userptr + off, e, sizeof(*e))) { - ret = -EFAULT; - goto free_counters; - } if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], @@ -1006,14 +1005,23 @@ copy_entries_to_user(unsigned int total_size, i += m->u.match_size) { m = (void *)e + i; - if (xt_match_to_user(m, userptr + off + i)) { + if (copy_to_user(userptr + off + i + + offsetof(struct xt_entry_match, + u.user.name), + m->u.kernel.match->name, + strlen(m->u.kernel.match->name)+1) + != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); - if (xt_target_to_user(t, userptr + off + e->target_offset)) { + if (copy_to_user(userptr + off + e->target_offset + + offsetof(struct xt_entry_target, + u.user.name), + t->u.kernel.target->name, + strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } @@ -1613,8 +1621,6 @@ translate_compat_table(struct net *net, if (!newinfo) goto out_unlock; - memset(newinfo->entries, 0, size); - newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index a379d2f79b19..590f767db5d4 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -112,7 +112,6 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { .table = "mangle", .target = ip6t_snpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), - .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), .checkentry = ip6t_npt_checkentry, .family = NFPROTO_IPV6, .hooks = (1 << NF_INET_LOCAL_IN) | @@ -124,7 +123,6 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { .table = "mangle", .target = ip6t_dnpt_tg, .targetsize = sizeof(struct ip6t_npt_tginfo), - .usersize = offsetof(struct ip6t_npt_tginfo, adjustment), .checkentry = ip6t_npt_checkentry, .family = NFPROTO_IPV6, .hooks = (1 << NF_INET_PRE_ROUTING) | diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index e2de4b0479f6..6b896cc9604e 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -14,11 +14,29 @@ static u32 __ipv6_select_ident(struct net *net, const struct in6_addr *dst, const struct in6_addr *src) { - u32 id; - - do { - id = prandom_u32(); - } while (!id); + const struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + .src = *src, + }; + u32 hash, id; + + /* Note the following code is not safe, but this is okay. */ + if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key))) + get_random_bytes(&net->ipv4.ip_id_key, + sizeof(net->ipv4.ip_id_key)); + + hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key); + + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve, + * set the hight order instead thus minimizing possible future + * collisions. + */ + id = ip_idents_reserve(hash, 1); + if (unlikely(!id)) + id = 1 << 31; return id; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e4a77eaa7a14..ca2537119570 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1770,37 +1770,6 @@ static int ip6_convert_metrics(struct mx6_config *mxc, return -EINVAL; } -static struct rt6_info *ip6_nh_lookup_table(struct net *net, - struct fib6_config *cfg, - const struct in6_addr *gw_addr) -{ - struct flowi6 fl6 = { - .flowi6_oif = cfg->fc_ifindex, - .daddr = *gw_addr, - .saddr = cfg->fc_prefsrc, - }; - struct fib6_table *table; - struct rt6_info *rt; - int flags = 0; - - table = fib6_get_table(net, cfg->fc_table); - if (!table) - return NULL; - - if (!ipv6_addr_any(&cfg->fc_prefsrc)) - flags |= RT6_LOOKUP_F_HAS_SADDR; - - rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); - - /* if table lookup failed, fall back to full lookup */ - if (rt == net->ipv6.ip6_null_entry) { - ip6_rt_put(rt); - rt = NULL; - } - - return rt; -} - static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) { struct net *net = cfg->fc_nlinfo.nl_net; @@ -1976,7 +1945,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) rt->rt6i_gateway = *gw_addr; if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { - struct rt6_info *grt = NULL; + struct rt6_info *grt; /* IPv6 strictly inhibits using not link-local addresses as nexthop address. @@ -1988,12 +1957,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) if (!(gwa_type & IPV6_ADDR_UNICAST)) goto out; - if (cfg->fc_table) - grt = ip6_nh_lookup_table(net, cfg, gw_addr); - - if (!grt) - grt = rt6_lookup(net, gw_addr, NULL, - cfg->fc_ifindex, 1); + grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); err = -EHOSTUNREACH; if (!grt) @@ -2991,11 +2955,9 @@ static int ip6_route_multipath_add(struct fib6_config *cfg) * nexthops have been replaced by first new, the rest should * be added to it. */ - if (cfg->fc_nlinfo.nlh) { - cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | - NLM_F_REPLACE); - cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; - } + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | + NLM_F_REPLACE); + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE; nhn++; } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 6a5442fdbd51..8bab7e64ffcf 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -209,6 +209,8 @@ static int ipip6_tunnel_create(struct net_device *dev) dev->rtnl_link_ops = &sit_link_ops; + dev_hold(dev); + ipip6_tunnel_link(sitn, t); return 0; @@ -1397,7 +1399,7 @@ static int ipip6_tunnel_init(struct net_device *dev) dev->tstats = NULL; return err; } - dev_hold(dev); + return 0; } @@ -1413,6 +1415,7 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev) iph->ihl = 5; iph->ttl = 64; + dev_hold(dev); rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); } @@ -1581,11 +1584,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, } #ifdef CONFIG_IPV6_SIT_6RD - if (ipip6_netlink_6rd_parms(data, &ip6rd)) { + if (ipip6_netlink_6rd_parms(data, &ip6rd)) err = ipip6_tunnel_update_6rd(nt, &ip6rd); - if (err < 0) - unregister_netdevice_queue(dev, NULL); - } #endif return err; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 857d9e14607a..ae5a5d06c218 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -309,20 +309,11 @@ failure: static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; - u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; - mtu = READ_ONCE(tcp_sk(sk)->mtu_info); - - /* Drop requests trying to increase our current mss. - * Check done in __ip6_rt_update_pmtu() is too late. - */ - if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) - return; - - dst = inet6_csk_update_pmtu(sk, mtu); + dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); if (!dst) return; @@ -401,8 +392,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } if (type == ICMPV6_PKT_TOOBIG) { - u32 mtu = ntohl(info); - /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). @@ -413,11 +402,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!ip6_sk_accept_pmtu(sk)) goto out; - if (mtu < IPV6_MIN_MTU) - goto out; - - WRITE_ONCE(tp->mtu_info, mtu); - + tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, @@ -995,11 +980,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) goto drop; - if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { - IP6_INC_STATS_BH(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); - return 0; - } - return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index ef6cc9eb0e45..b2dc9a820c6a 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -141,7 +141,7 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; - unsigned int mtu; + int mtu; bool toobig; #ifdef CONFIG_NETFILTER diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 78a4b9dd6167..f94107e27b5e 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1724,7 +1724,7 @@ static int iucv_callback_connreq(struct iucv_path *path, } /* Create the new socket */ - nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); if (!nsk) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); @@ -1934,7 +1934,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) goto out; } - nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); + nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0); bh_lock_sock(sk); if ((sk->sk_state != IUCV_LISTEN) || sk_acceptq_is_full(sk) || diff --git a/net/key/af_key.c b/net/key/af_key.c index 52ab3c3c11d8..54ff2c50e0e9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2923,7 +2923,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t) break; if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg)) + if (aalg_tmpl_set(t, aalg) && aalg->available) sz += sizeof(struct sadb_comb); } return sz + sizeof(struct sadb_prop); @@ -2941,7 +2941,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!ealg->pfkey_supported) continue; - if (!(ealg_tmpl_set(t, ealg))) + if (!(ealg_tmpl_set(t, ealg) && ealg->available)) continue; for (k = 1; ; k++) { @@ -2952,7 +2952,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t) if (!aalg->pfkey_supported) continue; - if (aalg_tmpl_set(t, aalg)) + if (aalg_tmpl_set(t, aalg) && aalg->available) sz += sizeof(struct sadb_comb); } } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 9a85b0133991..653892ea8f14 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -990,10 +990,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } if (tunnel->version == L2TP_HDR_VER_3 && - l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { - l2tp_session_dec_refcount(session); + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) goto error; - } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); l2tp_session_dec_refcount(session); diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index 7cbb77b7479a..ba4d015bd1a6 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c @@ -87,8 +87,7 @@ void lapb_kick(struct lapb_cb *lapb) skb = skb_dequeue(&lapb->write_queue); do { - skbn = skb_copy(skb, GFP_ATOMIC); - if (!skbn) { + if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&lapb->write_queue, skb); break; } diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 82b07bc43071..f613a1007107 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -96,16 +96,8 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) { u8 rc = LLC_PDU_LEN_U; - if (addr->sllc_test) + if (addr->sllc_test || addr->sllc_xid) rc = LLC_PDU_LEN_U; - else if (addr->sllc_xid) - /* We need to expand header to sizeof(struct llc_xid_info) - * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header - * as XID PDU. In llc_ui_sendmsg() we reserved header size and then - * filled all other space with user data. If we won't reserve this - * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data - */ - rc = LLC_PDU_LEN_U_XID; else if (sk->sk_type == SOCK_STREAM) rc = LLC_PDU_LEN_I; return rc; diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index 9fa3342c7a82..7ae4cc684d3a 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) struct llc_sap_state_ev *ev = llc_sap_ev(skb); int rc; - llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, + llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 3d49ffe8a34d..4932e9f243a2 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -109,7 +109,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); - ieee80211_tx_skb_tid(sdata, skb, tid); + ieee80211_tx_skb(sdata, skb); } void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c5be6bf2f00d..8739e4f85c19 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2448,14 +2448,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, continue; for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { - if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { + if (~sdata->rc_rateidx_mcs_mask[i][j]) { sdata->rc_has_mcs_mask[i] = true; break; } } for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { - if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { + if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { sdata->rc_has_vht_mcs_mask[i] = true; break; } diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 5d097ae26b70..df2e4e311217 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -128,11 +128,8 @@ int drv_sta_state(struct ieee80211_local *local, } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { ret = drv_sta_add(local, sdata, &sta->sta); - if (ret == 0) { + if (ret == 0) sta->uploaded = true; - if (rcu_access_pointer(sta->sta.rates)) - drv_sta_rate_tbl_update(local, sdata, &sta->sta); - } } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { drv_sta_remove(local, sdata, &sta->sta); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 95fcf57e6567..4499868c19e6 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1860,8 +1860,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) /* remove beacon */ kfree(sdata->u.ibss.ie); - sdata->u.ibss.ie = NULL; - sdata->u.ibss.ie_len = 0; /* on the next join, re-program HT parameters */ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3a91f32d1eda..b10fba4a9613 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -51,6 +51,12 @@ struct ieee80211_local; #define IEEE80211_ENCRYPT_HEADROOM 8 #define IEEE80211_ENCRYPT_TAILROOM 18 +/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent + * reception of at least three fragmented frames. This limit can be increased + * by changing this define, at the cost of slower frame reassembly and + * increased memory use (about 2 kB of RAM per entry). */ +#define IEEE80211_FRAGMENT_MAX 4 + /* power level hasn't been configured (or set to automatic) */ #define IEEE80211_UNSET_POWER_LEVEL INT_MIN @@ -79,6 +85,18 @@ struct ieee80211_local; #define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */) +struct ieee80211_fragment_entry { + struct sk_buff_head skb_list; + unsigned long first_frag_time; + u16 seq; + u16 extra_len; + u16 last_frag; + u8 rx_queue; + bool check_sequential_pn; /* needed for CCMP/GCMP */ + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ +}; + + struct ieee80211_bss { u32 device_ts_beacon, device_ts_presp; @@ -218,15 +236,8 @@ struct ieee80211_rx_data { */ int security_idx; - union { - struct { - u32 iv32; - u16 iv16; - } tkip; - struct { - u8 pn[IEEE80211_CCMP_PN_LEN]; - } ccm_gcm; - }; + u32 tkip_iv32; + u16 tkip_iv16; }; struct ieee80211_csa_settings { @@ -824,7 +835,9 @@ struct ieee80211_sub_if_data { char name[IFNAMSIZ]; - struct ieee80211_fragment_cache frags; + /* Fragment table for host-based reassembly */ + struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; + unsigned int fragment_next; /* TID bitmap for NoAck policy */ u16 noack_map; @@ -1015,7 +1028,6 @@ enum queue_stop_reason { IEEE80211_QUEUE_STOP_REASON_FLUSH, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID, - IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE, IEEE80211_QUEUE_STOP_REASONS, }; @@ -2064,7 +2076,4 @@ extern const struct ethtool_ops ieee80211_ethtool_ops; #define debug_noinline #endif -void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache); -void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache); - #endif /* IEEE80211_I_H */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index dceaad91c1e0..874644deb142 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1083,12 +1083,16 @@ static void ieee80211_set_multicast_list(struct net_device *dev) */ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) { + int i; + /* free extra data */ ieee80211_free_keys(sdata, false); ieee80211_debugfs_remove_netdev(sdata); - ieee80211_destroy_frag_cache(&sdata->frags); + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) + __skb_queue_purge(&sdata->fragments[i].skb_list); + sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); @@ -1504,10 +1508,6 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, if (ret) return ret; - ieee80211_stop_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); - synchronize_net(); - ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata); @@ -1528,8 +1528,6 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, err = ieee80211_do_open(&sdata->wdev, false); WARN(err, "type change: do_open returned %d", err); - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE); return ret; } @@ -1784,7 +1782,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; - ieee80211_init_frag_cache(&sdata->frags); + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) + skb_queue_head_init(&sdata->fragments[i].skb_list); INIT_LIST_HEAD(&sdata->key_list); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index a2050d5776ce..91a4e606edcd 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -646,7 +646,6 @@ int ieee80211_key_link(struct ieee80211_key *key, struct sta_info *sta) { struct ieee80211_local *local = sdata->local; - static atomic_t key_color = ATOMIC_INIT(0); struct ieee80211_key *old_key; int idx = key->conf.keyidx; bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; @@ -681,12 +680,6 @@ int ieee80211_key_link(struct ieee80211_key *key, key->sdata = sdata; key->sta = sta; - /* - * Assign a unique ID to every key so we can easily prevent mixed - * key and fragment cache attacks. - */ - key->color = atomic_inc_return(&key_color); - increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 9ac5c00dbe80..9951ef06323e 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -123,8 +123,6 @@ struct ieee80211_key { } debugfs; #endif - unsigned int color; - /* * key config, must be last because it contains key * material as variable length member diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 3752e43ef41b..7919cd7fadd2 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -889,19 +889,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) continue; if (!dflt_chandef.chan) { - /* - * Assign the first enabled channel to dflt_chandef - * from the list of channels - */ - for (i = 0; i < sband->n_channels; i++) - if (!(sband->channels[i].flags & - IEEE80211_CHAN_DISABLED)) - break; - /* if none found then use the first anyway */ - if (i == sband->n_channels) - i = 0; cfg80211_chandef_create(&dflt_chandef, - &sband->channels[i], + &sband->channels[0], NL80211_CHAN_NO_HT); /* init channel we're on */ if (!local->use_chanctx && !local->_oper_chandef.chan) { @@ -1019,11 +1008,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; - if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, - local->hw.n_cipher_schemes))) { - result = -EINVAL; - goto fail_workqueue; - } + WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes, + local->hw.n_cipher_schemes)); result = ieee80211_init_cipher_suites(local); if (result < 0) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index bae631cf549a..174486a6912d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1133,11 +1133,6 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata) sdata->vif.csa_active = false; ifmgd->csa_waiting_bcn = false; - /* - * If the CSA IE is still present on the beacon after the switch, - * we need to consider it as a new CSA (possibly to self). - */ - ifmgd->beacon_crc_valid = false; ret = drv_post_channel_switch(sdata); if (ret) { @@ -4431,7 +4426,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, if (new_sta) { u32 rates = 0, basic_rates = 0; - bool have_higher_than_11mbit = false; + bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; struct ieee80211_chanctx_conf *chanctx_conf; const struct cfg80211_bss_ies *ies; diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index d9756da625f0..694faf6ab574 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -890,8 +890,7 @@ int rate_control_set_rates(struct ieee80211_hw *hw, if (old) kfree_rcu(old, rcu_head); - if (sta->uploaded) - drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); + drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta); return 0; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index ba8d8b7a7a0b..1618c96674a4 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1739,34 +1739,19 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) return result; } -void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(cache->entries); i++) - skb_queue_head_init(&cache->entries[i].skb_list); -} - -void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(cache->entries); i++) - __skb_queue_purge(&cache->entries[i].skb_list); -} - static inline struct ieee80211_fragment_entry * -ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, +ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, unsigned int frag, unsigned int seq, int rx_queue, struct sk_buff **skb) { struct ieee80211_fragment_entry *entry; - entry = &cache->entries[cache->next++]; - if (cache->next >= IEEE80211_FRAGMENT_MAX) - cache->next = 0; + entry = &sdata->fragments[sdata->fragment_next++]; + if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) + sdata->fragment_next = 0; - __skb_queue_purge(&entry->skb_list); + if (!skb_queue_empty(&entry->skb_list)) + __skb_queue_purge(&entry->skb_list); __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ *skb = NULL; @@ -1781,14 +1766,14 @@ ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, } static inline struct ieee80211_fragment_entry * -ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, +ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, unsigned int frag, unsigned int seq, int rx_queue, struct ieee80211_hdr *hdr) { struct ieee80211_fragment_entry *entry; int i, idx; - idx = cache->next; + idx = sdata->fragment_next; for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { struct ieee80211_hdr *f_hdr; @@ -1796,7 +1781,7 @@ ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, if (idx < 0) idx = IEEE80211_FRAGMENT_MAX - 1; - entry = &cache->entries[idx]; + entry = &sdata->fragments[idx]; if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || entry->rx_queue != rx_queue || entry->last_frag + 1 != frag) @@ -1823,27 +1808,16 @@ ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, return NULL; } -static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) -{ - return rx->key && - (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || - rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && - ieee80211_has_protected(fc); -} - static ieee80211_rx_result debug_noinline ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) { - struct ieee80211_fragment_cache *cache = &rx->sdata->frags; struct ieee80211_hdr *hdr; u16 sc; __le16 fc; unsigned int frag, seq; struct ieee80211_fragment_entry *entry; struct sk_buff *skb; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + struct ieee80211_rx_status *status; hdr = (struct ieee80211_hdr *)rx->skb->data; fc = hdr->frame_control; @@ -1854,15 +1828,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; - if (rx->sta) - cache = &rx->sta->frags; + if (is_multicast_ether_addr(hdr->addr1)) { + I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); + goto out_no_led; + } if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) goto out; - if (is_multicast_ether_addr(hdr->addr1)) - return RX_DROP_MONITOR; - I802_DEBUG_INC(rx->local->rx_handlers_fragments); if (skb_linearize(rx->skb)) @@ -1878,17 +1851,20 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) if (frag == 0) { /* This is the first fragment of a new frame. */ - entry = ieee80211_reassemble_add(cache, frag, seq, + entry = ieee80211_reassemble_add(rx->sdata, frag, seq, rx->seqno_idx, &(rx->skb)); - if (requires_sequential_pn(rx, fc)) { + if (rx->key && + (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || + rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && + ieee80211_has_protected(fc)) { int queue = rx->security_idx; /* Store CCMP/GCMP PN so that we can verify that the * next fragment has a sequential PN value. */ entry->check_sequential_pn = true; - entry->is_protected = true; - entry->key_color = rx->key->color; memcpy(entry->last_pn, rx->key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN); @@ -1900,11 +1876,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) sizeof(rx->key->u.gcmp.rx_pn[queue])); BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); - } else if (rx->key && - (ieee80211_has_protected(fc) || - (status->flag & RX_FLAG_DECRYPTED))) { - entry->is_protected = true; - entry->key_color = rx->key->color; } return RX_QUEUED; } @@ -1912,7 +1883,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) /* This is a fragment for a frame that should already be pending in * fragment cache. Add this fragment to the end of the pending entry. */ - entry = ieee80211_reassemble_find(cache, frag, seq, + entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->seqno_idx, hdr); if (!entry) { I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); @@ -1927,39 +1898,25 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) if (entry->check_sequential_pn) { int i; u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; + int queue; - if (!requires_sequential_pn(rx, fc)) - return RX_DROP_UNUSABLE; - - /* Prevent mixed key and fragment cache attacks */ - if (entry->key_color != rx->key->color) + if (!rx->key || + (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && + rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) return RX_DROP_UNUSABLE; - memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { pn[i]++; if (pn[i]) break; } - - rpn = rx->ccm_gcm.pn; + queue = rx->security_idx; + rpn = rx->key->u.ccmp.rx_pn[queue]; if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) return RX_DROP_UNUSABLE; memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); - } else if (entry->is_protected && - (!rx->key || - (!ieee80211_has_protected(fc) && - !(status->flag & RX_FLAG_DECRYPTED)) || - rx->key->color != entry->key_color)) { - /* Drop this as a mixed key or fragment cache attack, even - * if for TKIP Michael MIC should protect us, and WEP is a - * lost cause anyway. - */ - return RX_DROP_UNUSABLE; - } else if (entry->is_protected && rx->key && - entry->key_color != rx->key->color && - (status->flag & RX_FLAG_DECRYPTED)) { - return RX_DROP_UNUSABLE; } skb_pull(rx->skb, ieee80211_hdrlen(fc)); @@ -1991,6 +1948,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) out: ieee80211_led_rx(rx->local); + out_no_led: if (rx->sta) rx->sta->rx_stats.packets++; return RX_CONTINUE; @@ -2147,13 +2105,13 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; /* - * Allow EAPOL frames to us/the PAE group address regardless of - * whether the frame was encrypted or not, and always disallow - * all other destination addresses for them. + * Allow EAPOL frames to us/the PAE group address regardless + * of whether the frame was encrypted or not. */ - if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) - return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || - ether_addr_equal(ehdr->h_dest, pae_group_addr); + if (ehdr->h_proto == rx->sdata->control_port_protocol && + (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || + ether_addr_equal(ehdr->h_dest, pae_group_addr))) + return true; if (ieee80211_802_1x_port_control(rx) || ieee80211_drop_unencrypted(rx, fc)) @@ -2182,7 +2140,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) if ((sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && - ehdr->h_proto != rx->sdata->control_port_protocol && (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { if (is_multicast_ether_addr(ehdr->h_dest)) { /* @@ -2235,30 +2192,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) #endif if (skb) { - struct ethhdr *ehdr = (struct ethhdr *)skb->data; - /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); - - /* - * 802.1X over 802.11 requires that the authenticator address - * be used for EAPOL frames. However, 802.1X allows the use of - * the PAE group address instead. If the interface is part of - * a bridge and we pass the frame with the PAE group address, - * then the bridge will forward it to the network (even if the - * client was not associated yet), which isn't supposed to - * happen. - * To avoid that, rewrite the destination address to our own - * address, so that the authenticator (e.g. hostapd) will see - * the frame, but bridge won't forward it anywhere else. Note - * that due to earlier filtering, the only other address can - * be the PAE group address. - */ - if (unlikely(skb->protocol == sdata->control_port_protocol && - !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) - ether_addr_copy(ehdr->h_dest, sdata->vif.addr); - if (rx->napi) napi_gro_receive(rx->napi, skb); else @@ -2322,23 +2258,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) if (skb_linearize(skb)) return RX_DROP_UNUSABLE; - if (rx->key) { - /* - * We should not receive A-MSDUs on pre-HT connections, - * and HT connections cannot use old ciphers. Thus drop - * them, as in those cases we couldn't even have SPP - * A-MSDUs or such. - */ - switch (rx->key->conf.cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_TKIP: - return RX_DROP_UNUSABLE; - default: - break; - } - } - ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, rx->sdata->vif.type, rx->local->hw.extra_tx_headroom, true); @@ -2437,13 +2356,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; - ac = ieee802_1d_to_ac[skb->priority]; + ac = ieee80211_select_queue_80211(sdata, skb, hdr); q = sdata->vif.hw_queue[ac]; if (ieee80211_queue_stopped(&local->hw, q)) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); return RX_DROP_MONITOR; } - skb_set_queue_mapping(skb, ac); + skb_set_queue_mapping(skb, q); if (!--mesh_hdr->ttl) { if (!is_multicast_ether_addr(hdr->addr1)) @@ -3448,8 +3367,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) if (!bssid) return false; if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || - ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || - !is_valid_ether_addr(hdr->addr2)) + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) return false; if (ieee80211_is_beacon(hdr->frame_control)) return true; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 5f2c8aeb9bd3..c4b192e7f48f 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -356,8 +356,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, sta->sdata = sdata; sta->rx_stats.last_rx = jiffies; - ieee80211_init_frag_cache(&sta->frags); - sta->sta_state = IEEE80211_STA_NONE; /* Mark TID as unreserved */ @@ -977,8 +975,6 @@ static void __sta_info_destroy_part2(struct sta_info *sta) ieee80211_sta_debugfs_remove(sta); ieee80211_recalc_min_chandef(sdata); - ieee80211_destroy_frag_cache(&sta->frags); - cleanup_single_sta(sta); } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index b2e5928b1f7b..15b0150283b6 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -324,34 +324,6 @@ struct mesh_sta { DECLARE_EWMA(signal, 1024, 8) -/* - * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent - * reception of at least one MSDU per access category per associated STA" - * on APs, or "at least one MSDU per access category" on other interface types. - * - * This limit can be increased by changing this define, at the cost of slower - * frame reassembly and increased memory use while fragments are pending. - */ -#define IEEE80211_FRAGMENT_MAX 4 - -struct ieee80211_fragment_entry { - struct sk_buff_head skb_list; - unsigned long first_frag_time; - u16 seq; - u16 extra_len; - u16 last_frag; - u8 rx_queue; - u8 check_sequential_pn:1, /* needed for CCMP/GCMP */ - is_protected:1; - u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ - unsigned int key_color; -}; - -struct ieee80211_fragment_cache { - struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX]; - unsigned int next; -}; - /** * struct sta_info - STA information * @@ -412,7 +384,6 @@ struct ieee80211_fragment_cache { * @tx_stats: TX statistics * @rx_stats: RX statistics * @status_stats: TX status statistics - * @frags: fragment cache */ struct sta_info { /* General information, mostly static */ @@ -522,8 +493,6 @@ struct sta_info { struct cfg80211_chan_def tdls_chandef; - struct ieee80211_fragment_cache frags; - /* keep last! */ struct ieee80211_sta sta; }; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 3884bb1a59dd..cb439e06919f 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -161,8 +161,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) update_iv: /* update IV in key information to be able to detect replays */ - rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32; - rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16; + rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32; + rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16; return RX_CONTINUE; @@ -292,8 +292,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, rx->security_idx, - &rx->tkip.iv32, - &rx->tkip.iv16); + &rx->tkip_iv32, + &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK) return RX_DROP_UNUSABLE; @@ -519,9 +519,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, return RX_DROP_UNUSABLE; } - /* reload hdr - skb might have been reallocated */ - hdr = (void *)rx->skb->data; - data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; @@ -556,8 +553,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, } memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); - if (unlikely(ieee80211_is_frag(hdr))) - memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); } /* Remove CCMP header and MIC */ @@ -754,9 +749,6 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; } - /* reload hdr - skb might have been reallocated */ - hdr = (void *)rx->skb->data; - data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; @@ -792,8 +784,6 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) } memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); - if (unlikely(ieee80211_is_frag(hdr))) - memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); } /* Remove GCMP header and MIC */ diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c index 55ed8a97b33f..a13d02b7cee4 100644 --- a/net/mac802154/llsec.c +++ b/net/mac802154/llsec.c @@ -158,7 +158,7 @@ err_tfm0: crypto_free_blkcipher(key->tfm0); err_tfm: for (i = 0; i < ARRAY_SIZE(key->tfm); i++) - if (!IS_ERR_OR_NULL(key->tfm[i])) + if (key->tfm[i]) crypto_free_aead(key->tfm[i]); kzfree(key); diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 4bc3eb0d8ff9..f83c255d7da2 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -71,7 +71,7 @@ config NF_CONNTRACK_MARK config NF_CONNTRACK_SECMARK bool 'Connection tracking security mark support' depends on NETWORK_SECMARK - default y if NETFILTER_ADVANCED=n + default m if NETFILTER_ADVANCED=n help This option enables security markings to be applied to connections. Typically they are copied to connections from diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 7b69d1ad8f3e..e5336ab36d67 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -102,17 +102,31 @@ htable_size(u8 hbits) { size_t hsize; - /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */ + /* We must fit both into u32 in jhash and size_t */ if (hbits > 31) return 0; hsize = jhash_size(hbits); - if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *) + if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *) < hsize) return 0; return hsize * sizeof(struct hbucket *) + sizeof(struct htable); } +/* Compute htable_bits from the user input parameter hashsize */ +static u8 +htable_bits(u32 hashsize) +{ + /* Assume that hashsize == 2^htable_bits */ + u8 bits = fls(hashsize - 1); + + if (jhash_size(bits) != hashsize) + /* Round up to the first 2^n value */ + bits = fls(hashsize); + + return bits; +} + #ifdef IP_SET_HASH_WITH_NETS #if IPSET_NET_COUNT > 1 #define __CIDR(cidr, i) (cidr[i]) @@ -1295,11 +1309,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, get_random_bytes(&h->initval, sizeof(h->initval)); set->timeout = IPSET_NO_TIMEOUT; - /* Compute htable_bits from the user input parameter hashsize. - * Assume that hashsize == 2^htable_bits, - * otherwise round up to the first 2^n value. - */ - hbits = fls(hashsize - 1); + hbits = htable_bits(hashsize); hsize = htable_size(hbits); if (hsize == 0) { kfree(h); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index de196dd95dcd..85ca189bdc3d 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1368,10 +1368,6 @@ int __init ip_vs_conn_init(void) int idx; /* Compute size and mask */ - if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) { - pr_info("conn_tab_bits not in [8, 20]. Using default value\n"); - ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; - } ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7065410b13ad..b45a60c22fab 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1226,7 +1226,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); svc->port = u->port; svc->fwmark = u->fwmark; - svc->flags = u->flags & ~IP_VS_SVC_F_HASHED; + svc->flags = u->flags; svc->timeout = u->timeout * HZ; svc->netmask = u->netmask; svc->ipvs = ipvs; @@ -3922,11 +3922,6 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; -#ifdef CONFIG_IP_VS_DEBUG - /* Global sysctls must be ro in non-init netns */ - if (!net_eq(net, &init_net)) - tbl[idx++].mode = 0444; -#endif ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); if (ipvs->sysctl_hdr == NULL) { diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c index ac57e47aded2..7d7466dbf663 100644 --- a/net/netfilter/nf_nat_proto_common.c +++ b/net/netfilter/nf_nat_proto_common.c @@ -38,7 +38,8 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, - const struct nf_conn *ct) + const struct nf_conn *ct, + u16 *rover) { unsigned int range_size, min, max, i; __be16 *portptr; @@ -83,13 +84,15 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) { off = prandom_u32(); } else { - off = prandom_u32(); + off = *rover; } for (i = 0; ; ++off) { *portptr = htons(min + off % range_size); if (++i != range_size && nf_nat_used_tuple(tuple, ct)) continue; + if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) + *rover = off; return; } } diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c index e7d27c083393..15c47b246d0d 100644 --- a/net/netfilter/nf_nat_proto_dccp.c +++ b/net/netfilter/nf_nat_proto_dccp.c @@ -20,6 +20,8 @@ #include #include +static u_int16_t dccp_port_rover; + static void dccp_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, @@ -27,7 +29,8 @@ dccp_unique_tuple(const struct nf_nat_l3proto *l3proto, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct); + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &dccp_port_rover); } static bool diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c index b839373716e8..cbc7ade1487b 100644 --- a/net/netfilter/nf_nat_proto_sctp.c +++ b/net/netfilter/nf_nat_proto_sctp.c @@ -14,6 +14,8 @@ #include +static u_int16_t nf_sctp_port_rover; + static void sctp_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, @@ -21,7 +23,8 @@ sctp_unique_tuple(const struct nf_nat_l3proto *l3proto, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct); + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &nf_sctp_port_rover); } static bool diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c index 882e79c6df73..4f8820fc5148 100644 --- a/net/netfilter/nf_nat_proto_tcp.c +++ b/net/netfilter/nf_nat_proto_tcp.c @@ -18,6 +18,8 @@ #include #include +static u16 tcp_port_rover; + static void tcp_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, @@ -25,7 +27,8 @@ tcp_unique_tuple(const struct nf_nat_l3proto *l3proto, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct); + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &tcp_port_rover); } static bool diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c index ed91bdd8857c..b1e627227b6e 100644 --- a/net/netfilter/nf_nat_proto_udp.c +++ b/net/netfilter/nf_nat_proto_udp.c @@ -17,6 +17,8 @@ #include #include +static u16 udp_port_rover; + static void udp_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, @@ -24,7 +26,8 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct); + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &udp_port_rover); } static bool diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c index 8be265378de9..58340c97bd83 100644 --- a/net/netfilter/nf_nat_proto_udplite.c +++ b/net/netfilter/nf_nat_proto_udplite.c @@ -17,6 +17,8 @@ #include #include +static u16 udplite_port_rover; + static void udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, struct nf_conntrack_tuple *tuple, @@ -24,7 +26,8 @@ udplite_unique_tuple(const struct nf_nat_l3proto *l3proto, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { - nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct); + nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct, + &udplite_port_rover); } static bool diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index dccd5520293f..b19ad20a705c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -44,15 +44,6 @@ void nf_unregister_queue_handler(struct net *net) } EXPORT_SYMBOL(nf_unregister_queue_handler); -static void nf_queue_sock_put(struct sock *sk) -{ -#ifdef CONFIG_INET - sock_gen_put(sk); -#else - sock_put(sk); -#endif -} - void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; @@ -63,7 +54,7 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry) if (state->out) dev_put(state->out); if (state->sk) - nf_queue_sock_put(state->sk); + sock_put(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { struct net_device *physdev; @@ -80,13 +71,10 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry) EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); /* Bump dev refs so they don't vanish while packet is out */ -bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) +void nf_queue_entry_get_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; - if (state->sk && !atomic_inc_not_zero(&state->sk->sk_refcnt)) - return false; - if (state->in) dev_hold(state->in); if (state->out) @@ -105,7 +93,6 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) dev_hold(physdev); } #endif - return true; } EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); @@ -159,11 +146,7 @@ int nf_queue(struct sk_buff *skb, .size = sizeof(*entry) + afinfo->route_key_size, }; - if (!nf_queue_entry_get_refs(entry)) { - kfree(entry); - return -ENOTCONN; - } - + nf_queue_entry_get_refs(entry); skb_dst_force(skb); afinfo->saveroute(skb, entry); status = qh->outfn(entry, queuenum); diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 8be604eb6961..c8a4a48bced9 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -34,9 +34,6 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, int length = (th->doff * 4) - sizeof(*th); u8 buf[40], *ptr; - if (unlikely(length < 0)) - return false; - ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf); if (ptr == NULL) return false; @@ -53,8 +50,6 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff, length--; continue; default: - if (length < 2) - return true; opsize = *ptr++; if (opsize < 2) return true; diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 63a9d5fd00c0..8c1733869343 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -355,14 +355,10 @@ static int nfnl_cthelper_update(const struct nlattr * const tb[], struct nf_conntrack_helper *helper) { - u32 size; int ret; - if (tb[NFCTH_PRIV_DATA_LEN]) { - size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); - if (size != helper->data_len) - return -EBUSY; - } + if (tb[NFCTH_PRIV_DATA_LEN]) + return -EBUSY; if (tb[NFCTH_POLICY]) { ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 33142140f204..54cde78c2718 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -486,7 +486,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, goto nla_put_failure; if (indev && entskb->dev && - skb_mac_header_was_set(entskb)) { + entskb->mac_header != entskb->network_header) { struct nfqnl_msg_packet_hw phw; int len; @@ -612,15 +612,9 @@ static struct nf_queue_entry * nf_queue_entry_dup(struct nf_queue_entry *e) { struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); - - if (!entry) - return NULL; - - if (nf_queue_entry_get_refs(entry)) - return entry; - - kfree(entry); - return NULL; + if (entry) + nf_queue_entry_get_refs(entry); + return entry; } #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index d6fcfc995420..a6c29c5bbfbd 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -189,10 +189,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx, nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_EXPR, priv->expr->ops->size); if (set->flags & NFT_SET_TIMEOUT) { - if (timeout || set->timeout) { - nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT); + if (timeout || set->timeout) nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION); - } } priv->timeout = timeout; diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a81f6bf42d1f..ba7aed13e174 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -34,9 +34,6 @@ static void nft_exthdr_eval(const struct nft_expr *expr, unsigned int offset = 0; int err; - if (pkt->skb->protocol != htons(ETH_P_IPV6)) - goto err; - err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (err < 0) goto err; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 182704b980d1..868480b83649 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -157,9 +157,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6); break; default: - if (tb[NFTA_NAT_REG_ADDR_MIN]) - return -EAFNOSUPPORT; - break; + return -EAFNOSUPPORT; } priv->family = family; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 2909c34dda7a..cdafbd38a456 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -266,66 +266,11 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) } EXPORT_SYMBOL_GPL(xt_request_find_target); - -static int xt_obj_to_user(u16 __user *psize, u16 size, - void __user *pname, const char *name, - u8 __user *prev, u8 rev) -{ - if (put_user(size, psize)) - return -EFAULT; - if (copy_to_user(pname, name, strlen(name) + 1)) - return -EFAULT; - if (put_user(rev, prev)) - return -EFAULT; - - return 0; -} - -#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ - xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ - U->u.user.name, K->u.kernel.TYPE->name, \ - &U->u.user.revision, K->u.kernel.TYPE->revision) - -int xt_data_to_user(void __user *dst, const void *src, - int usersize, int size) -{ - usersize = usersize ? : size; - if (copy_to_user(dst, src, usersize)) - return -EFAULT; - if (usersize != size && clear_user(dst + usersize, size - usersize)) - return -EFAULT; - - return 0; -} -EXPORT_SYMBOL_GPL(xt_data_to_user); - -#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ - xt_data_to_user(U->data, K->data, \ - K->u.kernel.TYPE->usersize, \ - C_SIZE ? : K->u.kernel.TYPE->TYPE##size) - -int xt_match_to_user(const struct xt_entry_match *m, - struct xt_entry_match __user *u) -{ - return XT_OBJ_TO_USER(u, m, match, 0) || - XT_DATA_TO_USER(u, m, match, 0); -} -EXPORT_SYMBOL_GPL(xt_match_to_user); - -int xt_target_to_user(const struct xt_entry_target *t, - struct xt_entry_target __user *u) -{ - return XT_OBJ_TO_USER(u, t, target, 0) || - XT_DATA_TO_USER(u, t, target, 0); -} -EXPORT_SYMBOL_GPL(xt_target_to_user); - static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_match *m; int have_rev = 0; - mutex_lock(&xt[af].mutex); list_for_each_entry(m, &xt[af].match, list) { if (strcmp(m->name, name) == 0) { if (m->revision > *bestp) @@ -334,7 +279,6 @@ static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) have_rev = 1; } } - mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); @@ -347,7 +291,6 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) const struct xt_target *t; int have_rev = 0; - mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt[af].target, list) { if (strcmp(t->name, name) == 0) { if (t->revision > *bestp) @@ -356,7 +299,6 @@ static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) have_rev = 1; } } - mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); @@ -370,10 +312,12 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, { int have_rev, best = -1; + mutex_lock(&xt[af].mutex); if (target == 1) have_rev = target_revfn(af, name, revision, &best); else have_rev = match_revfn(af, name, revision, &best); + mutex_unlock(&xt[af].mutex); /* Nothing at all? Return 0 to try loading module. */ if (best == -1) { @@ -622,7 +566,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; - int off = xt_compat_match_offset(match); + int pad, off = xt_compat_match_offset(match); u_int16_t msize = cm->u.user.match_size; char name[sizeof(m->u.user.name)]; @@ -632,6 +576,9 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, match->compat_from_user(m->data, cm->data); else memcpy(m->data, cm->data, msize - sizeof(*cm)); + pad = XT_ALIGN(match->matchsize) - match->matchsize; + if (pad > 0) + memset(m->data + match->matchsize, 0, pad); msize += off; m->u.user.match_size = msize; @@ -977,7 +924,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; - int off = xt_compat_target_offset(target); + int pad, off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; char name[sizeof(t->u.user.name)]; @@ -987,6 +934,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, target->compat_from_user(t->data, ct->data); else memcpy(t->data, ct->data, tsize - sizeof(*ct)); + pad = XT_ALIGN(target->targetsize) - target->targetsize; + if (pad > 0) + memset(t->data + target->targetsize, 0, pad); tsize += off; t->u.user.target_size = tsize; @@ -1194,9 +1144,6 @@ xt_replace_table(struct xt_table *table, smp_wmb(); table->private = newinfo; - /* make sure all cpus see new ->private value */ - smp_mb(); - /* * Even though table entries have now been swapped, other CPU's * may still be using the old entries. This is okay, because diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index f2fe60ce286f..febcfac7e3df 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -380,7 +380,6 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .name = "CT", .family = NFPROTO_UNSPEC, .targetsize = sizeof(struct xt_ct_target_info), - .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v0, .destroy = xt_ct_tg_destroy_v0, .target = xt_ct_target_v0, @@ -392,7 +391,6 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .family = NFPROTO_UNSPEC, .revision = 1, .targetsize = sizeof(struct xt_ct_target_info_v1), - .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v1, .destroy = xt_ct_tg_destroy_v1, .target = xt_ct_target_v1, @@ -404,7 +402,6 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = { .family = NFPROTO_UNSPEC, .revision = 2, .targetsize = sizeof(struct xt_ct_target_info_v1), - .usersize = offsetof(struct xt_ct_target_info, ct), .checkentry = xt_ct_tg_check_v2, .destroy = xt_ct_tg_destroy_v1, .target = xt_ct_target_v1, diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index 18e4fd8aa166..b0f4f1bca61f 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -505,7 +505,6 @@ static struct xt_target idletimer_tg __read_mostly = { .family = NFPROTO_UNSPEC, .target = idletimer_tg_target, .targetsize = sizeof(struct idletimer_tg_info), - .usersize = offsetof(struct idletimer_tg_info, timer), .checkentry = idletimer_tg_checkentry, .destroy = idletimer_tg_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 2d1c5c169a26..0858fe17e14a 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@ -198,7 +198,6 @@ static struct xt_target led_tg_reg __read_mostly = { .family = NFPROTO_UNSPEC, .target = led_tg, .targetsize = sizeof(struct xt_led_info), - .usersize = offsetof(struct xt_led_info, internal_data), .checkentry = led_tg_check, .destroy = led_tg_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 6768d4d2ffd0..0be96f8475f7 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c @@ -107,9 +107,6 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) } cfg; int ret; - if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name)) - return -ENAMETOOLONG; - if (unlikely(!rnd_inited)) { get_random_bytes(&jhash_rnd, sizeof(jhash_rnd)); rnd_inited = true; @@ -181,7 +178,6 @@ static struct xt_target xt_rateest_tg_reg __read_mostly = { .checkentry = xt_rateest_tg_checkentry, .destroy = xt_rateest_tg_destroy, .targetsize = sizeof(struct xt_rateest_target_info), - .usersize = offsetof(struct xt_rateest_target_info, est), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index d597b504a82e..3eff7b67cdf2 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -127,7 +127,6 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .family = NFPROTO_IPV4, .target = tee_tg4, .targetsize = sizeof(struct xt_tee_tginfo), - .usersize = offsetof(struct xt_tee_tginfo, priv), .checkentry = tee_tg_check, .destroy = tee_tg_destroy, .me = THIS_MODULE, @@ -139,7 +138,6 @@ static struct xt_target tee_tg_reg[] __read_mostly = { .family = NFPROTO_IPV6, .target = tee_tg6, .targetsize = sizeof(struct xt_tee_tginfo), - .usersize = offsetof(struct xt_tee_tginfo, priv), .checkentry = tee_tg_check, .destroy = tee_tg_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 2e26df862d38..7b993f25aab9 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c @@ -60,7 +60,6 @@ static struct xt_match bpf_mt_reg __read_mostly = { .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), - .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index aab11f7ab547..99bbc829868d 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -437,7 +437,6 @@ static struct xt_match connlimit_mt_reg __read_mostly = { .checkentry = connlimit_mt_check, .match = connlimit_mt, .matchsize = sizeof(struct xt_connlimit_info), - .usersize = offsetof(struct xt_connlimit_info, data), .destroy = connlimit_mt_destroy, .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index d893cc133de4..7381be0cdcdf 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -726,7 +726,6 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV4, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), - .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, @@ -738,7 +737,6 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = { .family = NFPROTO_IPV6, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), - .usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index e84de7656289..bef850596558 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -193,7 +193,6 @@ static struct xt_match limit_mt_reg __read_mostly = { .compat_from_user = limit_mt_compat_from_user, .compat_to_user = limit_mt_compat_to_user, #endif - .usersize = offsetof(struct xt_rateinfo, prev), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index e9adf6ebca30..3048a7e3a90a 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -62,7 +62,6 @@ static struct xt_match nfacct_mt_reg __read_mostly = { .match = nfacct_mt, .destroy = nfacct_mt_destroy, .matchsize = sizeof(struct xt_nfacct_match_info), - .usersize = offsetof(struct xt_nfacct_match_info, nfacct), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index 9b7a11a55d4e..e82524b21d07 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -1067,6 +1067,18 @@ static struct sock_tag *get_sock_stat_nl(const struct sock *sk) return sock_tag_tree_search(&sock_tag_tree, sk); } +static struct sock_tag *get_sock_stat(const struct sock *sk) +{ + struct sock_tag *sock_tag_entry; + MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk); + if (!sk) + return NULL; + spin_lock_bh(&sock_tag_list_lock); + sock_tag_entry = get_sock_stat_nl(sk); + spin_unlock_bh(&sock_tag_list_lock); + return sock_tag_entry; +} + static int ipx_proto(const struct sk_buff *skb, struct xt_action_param *par) { @@ -1298,15 +1310,12 @@ static void if_tag_stat_update(const char *ifname, uid_t uid, * Look for a tagged sock. * It will have an acct_uid. */ - spin_lock_bh(&sock_tag_list_lock); - sock_tag_entry = sk ? get_sock_stat_nl(sk) : NULL; + sock_tag_entry = get_sock_stat(sk); if (sock_tag_entry) { tag = sock_tag_entry->tag; acct_tag = get_atag_from_tag(tag); uid_tag = get_utag_from_tag(tag); - } - spin_unlock_bh(&sock_tag_list_lock); - if (!sock_tag_entry) { + } else { acct_tag = make_atag_from_value(0); tag = combine_atag_with_uid(acct_tag, uid); uid_tag = make_tag_from_uid(uid); @@ -2403,20 +2412,15 @@ int qtaguid_untag(struct socket *el_socket, bool kernel) * At first, we want to catch user-space code that is not * opening the /dev/xt_qtaguid. */ - if (IS_ERR_OR_NULL(pqd_entry)) + if (IS_ERR_OR_NULL(pqd_entry) || !sock_tag_entry->list.next) { pr_warn_once("qtaguid: %s(): " "User space forgot to open /dev/xt_qtaguid? " "pid=%u tgid=%u sk_pid=%u, uid=%u\n", __func__, current->pid, current->tgid, sock_tag_entry->pid, from_kuid(&init_user_ns, current_fsuid())); - /* - * This check is needed because tagging from a process that - * didn’t open /dev/xt_qtaguid still adds the sock_tag_entry - * to sock_tag_tree. - */ - if (sock_tag_entry->list.next) + } else { list_del(&sock_tag_entry->list); - + } spin_unlock_bh(&uid_tag_data_tree_lock); /* * We don't free tag_ref from the utd_entry here, diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c index 10d61a6eed71..44c8eb4c9d66 100644 --- a/net/netfilter/xt_quota.c +++ b/net/netfilter/xt_quota.c @@ -73,7 +73,6 @@ static struct xt_match quota_mt_reg __read_mostly = { .checkentry = quota_mt_check, .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), - .usersize = offsetof(struct xt_quota_info, master), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index 5c1020b69cac..0bb7e22c8174 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -141,8 +141,6 @@ static ssize_t quota_proc_write(struct file *file, const char __user *input, if (copy_from_user(buf, input, size) != 0) return -EFAULT; buf[sizeof(buf)-1] = '\0'; - if (size < sizeof(buf)) - buf[size] = '\0'; spin_lock_bh(&e->lock); e->quota = simple_strtoull(buf, NULL, 0); @@ -290,8 +288,6 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; struct xt_quota_counter *e = q->master; - int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; - bool no_change = q->flags & XT_QUOTA_NO_CHANGE; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&e->lock); @@ -300,17 +296,20 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) * While no_change is pointless in "grow" mode, we will * implement it here simply to have a consistent behavior. */ - if (!no_change) - e->quota += charge; - ret = true; /* note: does not respect inversion (bug??) */ + if (!(q->flags & XT_QUOTA_NO_CHANGE)) { + e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + } + ret = true; } else { - if (e->quota > charge) { - if (!no_change) - e->quota -= charge; + if (e->quota > skb->len) { + if (!(q->flags & XT_QUOTA_NO_CHANGE)) + e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; ret = !ret; - } else if (e->quota) { + } else { /* We are transitioning, log that fact. */ - quota2_log(par->in, par->out, e, q->name); + if (e->quota) { + quota2_log(par->in, par->out, e, q->name); + } /* we do not allow even small packets from now on */ e->quota = 0; } @@ -328,7 +327,6 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { .match = quota_mt2, .destroy = quota_mt2_destroy, .matchsize = sizeof(struct xt_quota_mtinfo2), - .usersize = offsetof(struct xt_quota_mtinfo2, master), .me = THIS_MODULE, }, { @@ -339,7 +337,6 @@ static struct xt_match quota_mt2_reg[] __read_mostly = { .match = quota_mt2, .destroy = quota_mt2_destroy, .matchsize = sizeof(struct xt_quota_mtinfo2), - .usersize = offsetof(struct xt_quota_mtinfo2, master), .me = THIS_MODULE, }, }; diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index d46dc9ff591f..7720b036d76a 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -135,7 +135,6 @@ static struct xt_match xt_rateest_mt_reg __read_mostly = { .checkentry = xt_rateest_mt_checkentry, .destroy = xt_rateest_mt_destroy, .matchsize = sizeof(struct xt_rateest_match_info), - .usersize = offsetof(struct xt_rateest_match_info, est1), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index ffe673c6a248..cd53b861a15c 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -156,8 +156,7 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) /* * Drop entries with timestamps older then 'time'. */ -static void recent_entry_reap(struct recent_table *t, unsigned long time, - struct recent_entry *working, bool update) +static void recent_entry_reap(struct recent_table *t, unsigned long time) { struct recent_entry *e; @@ -166,12 +165,6 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time, */ e = list_entry(t->lru_list.next, struct recent_entry, lru_list); - /* - * Do not reap the entry which are going to be updated. - */ - if (e == working && update) - return; - /* * The last time stamp is the most recent. */ @@ -314,8 +307,7 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) /* info->seconds must be non-zero */ if (info->check_set & XT_RECENT_REAP) - recent_entry_reap(t, time, e, - info->check_set & XT_RECENT_UPDATE && ret); + recent_entry_reap(t, time); } if (info->check_set & XT_RECENT_SET || diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c index 8710fdba2ae2..11de55e7a868 100644 --- a/net/netfilter/xt_statistic.c +++ b/net/netfilter/xt_statistic.c @@ -84,7 +84,6 @@ static struct xt_match xt_statistic_mt_reg __read_mostly = { .checkentry = statistic_mt_check, .destroy = statistic_mt_destroy, .matchsize = sizeof(struct xt_statistic_info), - .usersize = offsetof(struct xt_statistic_info, master), .me = THIS_MODULE, }; diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index 423293ee57c2..0bc3460319c8 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -77,7 +77,6 @@ static struct xt_match xt_string_mt_reg __read_mostly = { .match = string_mt, .destroy = string_mt_destroy, .matchsize = sizeof(struct xt_string_info), - .usersize = offsetof(struct xt_string_info, config), .me = THIS_MODULE, }; diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index 422fac2a4a3c..7fd1104ba900 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -163,8 +163,8 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, return -ENOMEM; doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL); if (doi_def->map.std == NULL) { - kfree(doi_def); - return -ENOMEM; + ret_val = -ENOMEM; + goto add_std_failure; } doi_def->type = CIPSO_V4_MAP_TRANS; @@ -205,14 +205,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, } doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, sizeof(u32), - GFP_KERNEL | __GFP_NOWARN); + GFP_KERNEL); if (doi_def->map.std->lvl.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size, sizeof(u32), - GFP_KERNEL | __GFP_NOWARN); + GFP_KERNEL); if (doi_def->map.std->lvl.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; @@ -279,7 +279,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, doi_def->map.std->cat.local = kcalloc( doi_def->map.std->cat.local_size, sizeof(u32), - GFP_KERNEL | __GFP_NOWARN); + GFP_KERNEL); if (doi_def->map.std->cat.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; @@ -287,7 +287,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info, doi_def->map.std->cat.cipso = kcalloc( doi_def->map.std->cat.cipso_size, sizeof(u32), - GFP_KERNEL | __GFP_NOWARN); + GFP_KERNEL); if (doi_def->map.std->cat.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index 5f1218dc9162..13f777f20995 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -92,7 +92,6 @@ static const struct nla_policy netlbl_mgmt_genl_policy[NLBL_MGMT_A_MAX + 1] = { static int netlbl_mgmt_add_common(struct genl_info *info, struct netlbl_audit *audit_info) { - void *pmap = NULL; int ret_val = -EINVAL; struct netlbl_domaddr_map *addrmap = NULL; struct cipso_v4_doi *cipsov4 = NULL; @@ -166,7 +165,6 @@ static int netlbl_mgmt_add_common(struct genl_info *info, ret_val = -ENOMEM; goto add_free_addrmap; } - pmap = map; map->list.addr = addr->s_addr & mask->s_addr; map->list.mask = mask->s_addr; map->list.valid = 1; @@ -175,8 +173,10 @@ static int netlbl_mgmt_add_common(struct genl_info *info, map->def.cipso = cipsov4; ret_val = netlbl_af4list_add(&map->list, &addrmap->list4); - if (ret_val != 0) - goto add_free_map; + if (ret_val != 0) { + kfree(map); + goto add_free_addrmap; + } entry->def.type = NETLBL_NLTYPE_ADDRSELECT; entry->def.addrsel = addrmap; @@ -212,7 +212,6 @@ static int netlbl_mgmt_add_common(struct genl_info *info, ret_val = -ENOMEM; goto add_free_addrmap; } - pmap = map; map->list.addr = *addr; map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; @@ -223,8 +222,10 @@ static int netlbl_mgmt_add_common(struct genl_info *info, map->def.type = entry->def.type; ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); - if (ret_val != 0) - goto add_free_map; + if (ret_val != 0) { + kfree(map); + goto add_free_addrmap; + } entry->def.type = NETLBL_NLTYPE_ADDRSELECT; entry->def.addrsel = addrmap; @@ -233,12 +234,10 @@ static int netlbl_mgmt_add_common(struct genl_info *info, ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) - goto add_free_map; + goto add_free_addrmap; return 0; -add_free_map: - kfree(pmap); add_free_addrmap: kfree(addrmap); add_doi_put_def: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 98d8453eee27..f4c33b2858e4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -419,13 +419,11 @@ void netlink_table_ungrab(void) static inline void netlink_lock_table(void) { - unsigned long flags; - /* read_lock() synchronizes us to netlink_table_grab */ - read_lock_irqsave(&nl_table_lock, flags); + read_lock(&nl_table_lock); atomic_inc(&nl_table_users); - read_unlock_irqrestore(&nl_table_lock, flags); + read_unlock(&nl_table_lock); } static inline void @@ -558,10 +556,7 @@ static int netlink_insert(struct sock *sk, u32 portid) /* We need to ensure that the socket is hashed and visible. */ smp_wmb(); - /* Paired with lockless reads from netlink_bind(), - * netlink_connect() and netlink_sendmsg(). - */ - WRITE_ONCE(nlk_sk(sk)->bound, portid); + nlk_sk(sk)->bound = portid; err: release_sock(sk); @@ -976,8 +971,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, else if (nlk->ngroups < 8*sizeof(groups)) groups &= (1UL << nlk->ngroups) - 1; - /* Paired with WRITE_ONCE() in netlink_insert() */ - bound = READ_ONCE(nlk->bound); + bound = nlk->bound; if (bound) { /* Ensure nlk->portid is up-to-date. */ smp_rmb(); @@ -1057,9 +1051,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, /* No need for barriers here as we return to user-space without * using any of the bound attributes. - * Paired with WRITE_ONCE() in netlink_insert(). */ - if (!READ_ONCE(nlk->bound)) + if (!nlk->bound) err = netlink_autobind(sock); if (err == 0) { @@ -1784,11 +1777,6 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; - if (len == 0) { - pr_warn_once("Zero length message leads to an empty skb\n"); - return -ENODATA; - } - err = scm_send(sock, msg, &scm, true); if (err < 0) return err; @@ -1811,8 +1799,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) dst_group = nlk->dst_group; } - /* Paired with WRITE_ONCE() in netlink_insert() */ - if (!READ_ONCE(nlk->bound)) { + if (!nlk->bound) { err = netlink_autobind(sock); if (err) goto out; @@ -2396,15 +2383,13 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); - if (err == -ESRCH) - err = 0; } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, portid); - if (!err) + if (!err || err == -ESRCH) err = err2; } diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index d1a0b7056743..f0ecaec1ff3d 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -125,9 +125,11 @@ static void nr_heartbeat_expiry(unsigned long param) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { + sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); - goto out; + sock_put(sk); + return; } break; @@ -148,8 +150,6 @@ static void nr_heartbeat_expiry(unsigned long param) nr_start_heartbeat(sk); bh_unlock_sock(sk); -out: - sock_put(sk); } static void nr_t2timer_expiry(unsigned long param) @@ -163,7 +163,6 @@ static void nr_t2timer_expiry(unsigned long param) nr_enquiry_response(sk); } bh_unlock_sock(sk); - sock_put(sk); } static void nr_t4timer_expiry(unsigned long param) @@ -173,7 +172,6 @@ static void nr_t4timer_expiry(unsigned long param) bh_lock_sock(sk); nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; bh_unlock_sock(sk); - sock_put(sk); } static void nr_idletimer_expiry(unsigned long param) @@ -202,7 +200,6 @@ static void nr_idletimer_expiry(unsigned long param) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); - sock_put(sk); } static void nr_t1timer_expiry(unsigned long param) @@ -215,7 +212,8 @@ static void nr_t1timer_expiry(unsigned long param) case NR_STATE_1: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - goto out; + bh_unlock_sock(sk); + return; } else { nr->n2count++; nr_write_internal(sk, NR_CONNREQ); @@ -225,7 +223,8 @@ static void nr_t1timer_expiry(unsigned long param) case NR_STATE_2: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - goto out; + bh_unlock_sock(sk); + return; } else { nr->n2count++; nr_write_internal(sk, NR_DISCREQ); @@ -235,7 +234,8 @@ static void nr_t1timer_expiry(unsigned long param) case NR_STATE_3: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); - goto out; + bh_unlock_sock(sk); + return; } else { nr->n2count++; nr_requeue_frames(sk); @@ -244,7 +244,5 @@ static void nr_t1timer_expiry(unsigned long param) } nr_start_t1timer(sk); -out: bh_unlock_sock(sk); - sock_put(sk); } diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c index 1859b8e98ded..54e40fa47822 100644 --- a/net/nfc/af_nfc.c +++ b/net/nfc/af_nfc.c @@ -72,9 +72,6 @@ int nfc_proto_register(const struct nfc_protocol *nfc_proto) proto_tab[nfc_proto->id] = nfc_proto; write_unlock(&proto_tab_lock); - if (rc) - proto_unregister(nfc_proto->proto); - return rc; } EXPORT_SYMBOL(nfc_proto_register); diff --git a/net/nfc/core.c b/net/nfc/core.c index 8c7f221e1d12..1471e4b0aa2c 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -106,13 +106,13 @@ int nfc_dev_up(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { - rc = -ENODEV; + if (dev->rfkill && rfkill_blocked(dev->rfkill)) { + rc = -ERFKILL; goto error; } - if (dev->rfkill && rfkill_blocked(dev->rfkill)) { - rc = -ERFKILL; + if (!device_is_registered(&dev->dev)) { + rc = -ENODEV; goto error; } @@ -1120,7 +1120,11 @@ int nfc_register_device(struct nfc_dev *dev) if (rc) pr_err("Could not register llcp device\n"); - device_lock(&dev->dev); + rc = nfc_genl_device_added(dev); + if (rc) + pr_debug("The userspace won't be notified that the device %s was added\n", + dev_name(&dev->dev)); + dev->rfkill = rfkill_alloc(dev_name(&dev->dev), &dev->dev, RFKILL_TYPE_NFC, &nfc_rfkill_ops, dev); if (dev->rfkill) { @@ -1129,12 +1133,6 @@ int nfc_register_device(struct nfc_dev *dev) dev->rfkill = NULL; } } - device_unlock(&dev->dev); - - rc = nfc_genl_device_added(dev); - if (rc) - pr_debug("The userspace won't be notified that the device %s was added\n", - dev_name(&dev->dev)); return 0; } @@ -1151,17 +1149,10 @@ void nfc_unregister_device(struct nfc_dev *dev) pr_debug("dev_name=%s\n", dev_name(&dev->dev)); - rc = nfc_genl_device_removed(dev); - if (rc) - pr_debug("The userspace won't be notified that the device %s " - "was removed\n", dev_name(&dev->dev)); - - device_lock(&dev->dev); if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); } - device_unlock(&dev->dev); if (dev->ops->check_presence) { device_lock(&dev->dev); @@ -1171,6 +1162,11 @@ void nfc_unregister_device(struct nfc_dev *dev) cancel_work_sync(&dev->check_pres_work); } + rc = nfc_genl_device_removed(dev); + if (rc) + pr_debug("The userspace won't be notified that the device %s " + "was removed\n", dev_name(&dev->dev)); + nfc_llcp_unregister_device(dev); mutex_lock(&nfc_devlist_mutex); diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c index 28c60e291c7e..23c2a118ac9f 100644 --- a/net/nfc/digital_core.c +++ b/net/nfc/digital_core.c @@ -280,7 +280,6 @@ int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech) { struct digital_tg_mdaa_params *params; - int rc; params = kzalloc(sizeof(struct digital_tg_mdaa_params), GFP_KERNEL); if (!params) @@ -295,12 +294,8 @@ static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech) get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2); params->sc = DIGITAL_SENSF_FELICA_SC; - rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params, - 500, digital_tg_recv_atr_req, NULL); - if (rc) - kfree(params); - - return rc; + return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params, + 500, digital_tg_recv_atr_req, NULL); } static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech) diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c index 46375ff214c0..f72be7433df3 100644 --- a/net/nfc/digital_dep.c +++ b/net/nfc/digital_dep.c @@ -1187,8 +1187,6 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg, } rc = nfc_tm_data_received(ddev->nfc_dev, resp); - if (rc) - resp = NULL; exit: kfree_skb(ddev->chaining_skb); diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c index 082dd95f6ef3..fb58ed2dd41d 100644 --- a/net/nfc/digital_technology.c +++ b/net/nfc/digital_technology.c @@ -473,12 +473,8 @@ static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev, *skb_put(skb, sizeof(u8)) = sel_cmd; *skb_put(skb, sizeof(u8)) = DIGITAL_SDD_REQ_SEL_PAR; - rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res, - target); - if (rc) - kfree_skb(skb); - - return rc; + return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res, + target); } static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg, diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 1d61a08eafaf..44d6b8355eab 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -119,19 +119,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { - nfc_llcp_local_put(llcp_sock->local); - llcp_sock->local = NULL; - llcp_sock->dev = NULL; ret = -ENOMEM; goto put_dev; } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { - nfc_llcp_local_put(llcp_sock->local); - llcp_sock->local = NULL; kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; - llcp_sock->dev = NULL; ret = -EADDRINUSE; goto put_dev; } @@ -683,10 +677,6 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, ret = -EISCONN; goto error; } - if (sk->sk_state == LLCP_CONNECTING) { - ret = -EINPROGRESS; - goto error; - } dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { @@ -718,8 +708,6 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { - nfc_llcp_local_put(llcp_sock->local); - llcp_sock->local = NULL; ret = -ENOMEM; goto put_dev; } @@ -757,12 +745,8 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, sock_unlink: nfc_llcp_put_ssap(local, llcp_sock->ssap); - nfc_llcp_local_put(llcp_sock->local); - llcp_sock->local = NULL; nfc_llcp_sock_unlink(&local->connecting_sockets, sk); - kfree(llcp_sock->service_name); - llcp_sock->service_name = NULL; put_dev: nfc_put_device(dev); @@ -790,11 +774,6 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); - if (!llcp_sock->local) { - release_sock(sk); - return -ENODEV; - } - if (sk->sk_type == SOCK_DGRAM) { DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr, msg->msg_name); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index d5d215776980..6ac1a8d19b88 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -149,15 +149,12 @@ inline int nci_request(struct nci_dev *ndev, { int rc; + if (!test_bit(NCI_UP, &ndev->flags)) + return -ENETDOWN; + /* Serialize all requests */ mutex_lock(&ndev->req_lock); - /* check the state after obtaing the lock against any races - * from nci_close_device when the device gets removed. - */ - if (test_bit(NCI_UP, &ndev->flags)) - rc = __nci_request(ndev, req, opt, timeout); - else - rc = -ENETDOWN; + rc = __nci_request(ndev, req, opt, timeout); mutex_unlock(&ndev->req_lock); return rc; @@ -401,11 +398,6 @@ static int nci_open_device(struct nci_dev *ndev) mutex_lock(&ndev->req_lock); - if (test_bit(NCI_UNREG, &ndev->flags)) { - rc = -ENODEV; - goto done; - } - if (test_bit(NCI_UP, &ndev->flags)) { rc = -EALREADY; goto done; @@ -469,10 +461,6 @@ done: static int nci_close_device(struct nci_dev *ndev) { nci_req_cancel(ndev, ENODEV); - - /* This mutex needs to be held as a barrier for - * caller nci_unregister_device - */ mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { @@ -510,8 +498,8 @@ static int nci_close_device(struct nci_dev *ndev) /* Flush cmd wq */ flush_workqueue(ndev->cmd_wq); - /* Clear flags except NCI_UNREG */ - ndev->flags &= BIT(NCI_UNREG); + /* Clear flags */ + ndev->flags = 0; mutex_unlock(&ndev->req_lock); @@ -1111,7 +1099,6 @@ EXPORT_SYMBOL(nci_allocate_device); void nci_free_device(struct nci_dev *ndev) { nfc_free_device(ndev->nfc_dev); - nci_hci_deallocate(ndev); kfree(ndev); } EXPORT_SYMBOL(nci_free_device); @@ -1191,12 +1178,6 @@ void nci_unregister_device(struct nci_dev *ndev) { struct nci_conn_info *conn_info, *n; - /* This set_bit is not protected with specialized barrier, - * However, it is fine because the mutex_lock(&ndev->req_lock); - * in nci_close_device() will help to emit one. - */ - set_bit(NCI_UNREG, &ndev->flags); - nci_close_device(ndev); destroy_workqueue(ndev->cmd_wq); diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index 309e8cebed55..2aedac15cb59 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c @@ -798,8 +798,3 @@ struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev) return hdev; } - -void nci_hci_deallocate(struct nci_dev *ndev) -{ - kfree(ndev->hci_dev); -} diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c index 74e4d5e8c275..9b6eb913d801 100644 --- a/net/nfc/nci/rsp.c +++ b/net/nfc/nci/rsp.c @@ -274,8 +274,6 @@ static void nci_core_conn_close_rsp_packet(struct nci_dev *ndev, conn_info = nci_get_conn_info_by_conn_id(ndev, ndev->cur_id); if (conn_info) { list_del(&conn_info->list); - if (conn_info == ndev->rf_conn_info) - ndev->rf_conn_info = NULL; devm_kfree(&ndev->nfc_dev->dev, conn_info); } } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 4286b900a306..639e5cad0442 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -632,10 +632,8 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - if (iter) { - nfc_device_iter_exit(iter); - kfree(iter); - } + nfc_device_iter_exit(iter); + kfree(iter); return 0; } @@ -852,7 +850,6 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info) if (!dev->polling) { device_unlock(&dev->dev); - nfc_put_device(dev); return -EINVAL; } @@ -1368,10 +1365,8 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb) { struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; - if (iter) { - nfc_device_iter_exit(iter); - kfree(iter); - } + nfc_device_iter_exit(iter); + kfree(iter); return 0; } diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 2fba626a0125..574af981806f 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -117,7 +117,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; - goto put_dev; + goto error; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); @@ -345,7 +345,7 @@ static int rawsock_create(struct net *net, struct socket *sock, return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { - if (!ns_capable(net->user_ns, CAP_NET_RAW)) + if (!capable(CAP_NET_RAW)) return -EPERM; sock->ops = &rawsock_raw_ops; } else { diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 663ef3b67ad7..828fdced4ecd 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -375,43 +375,12 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) +static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) { - u8 old_ipv6_tclass = ipv6_get_dsfield(nh); - - ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); - - if (skb->ip_summed == CHECKSUM_COMPLETE) - csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), - (__force __wsum)(ipv6_tclass << 12)); - - ipv6_change_dsfield(nh, ~mask, ipv6_tclass); -} - -static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) -{ - u32 ofl; - - ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; - fl = OVS_MASKED(ofl, fl, mask); - /* Bits 21-24 are always unmasked, so this retains their values. */ - nh->flow_lbl[0] = (u8)(fl >> 16); - nh->flow_lbl[1] = (u8)(fl >> 8); - nh->flow_lbl[2] = (u8)fl; - - if (skb->ip_summed == CHECKSUM_COMPLETE) - csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); -} - -static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) -{ - new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); - - if (skb->ip_summed == CHECKSUM_COMPLETE) - csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), - (__force __wsum)(new_ttl << 8)); - nh->hop_limit = new_ttl; + OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); + OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); + OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); } static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, @@ -529,17 +498,18 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, } } if (mask->ipv6_tclass) { - set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); + ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); flow_key->ip.tos = ipv6_get_dsfield(nh); } if (mask->ipv6_label) { - set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), + set_ipv6_fl(nh, ntohl(key->ipv6_label), ntohl(mask->ipv6_label)); flow_key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); } if (mask->ipv6_hlimit) { - set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); + OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, + mask->ipv6_hlimit); flow_key->ip.ttl = nh->hop_limit; } return 0; @@ -724,16 +694,16 @@ static void ovs_fragment(struct net *net, struct vport *vport, } if (ethertype == htons(ETH_P_IP)) { - struct rtable ovs_rt = { 0 }; + struct dst_entry ovs_dst; unsigned long orig_dst; prepare_frag(vport, skb); - dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1, + dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1, DST_OBSOLETE_NONE, DST_NOCOUNT); - ovs_rt.dst.dev = vport->dev; + ovs_dst.dev = vport->dev; orig_dst = skb->_skb_refdst; - skb_dst_set_noref(skb, &ovs_rt.dst); + skb_dst_set_noref(skb, &ovs_dst); IPCB(skb)->frag_max_size = mru; ip_do_fragment(net, skb->sk, skb, ovs_vport_output); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ef9edd70160c..eac6f7eea7b5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1709,7 +1709,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; - match->prot_hook.af_packet_net = read_pnet(&match->net); match->prot_hook.id_match = match_fanout_group; list_add(&match->list, &fanout_list); } @@ -1723,10 +1722,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) err = -ENOSPC; if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) { __dev_remove_pack(&po->prot_hook); - - /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ - WRITE_ONCE(po->fanout, match); - + po->fanout = match; po->rollover = rollover; rollover = NULL; atomic_inc(&match->sk_ref); @@ -3171,7 +3167,6 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; - po->prot_hook.af_packet_net = sock_net(sk); if (proto) { po->prot_hook.type = proto; @@ -3798,8 +3793,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv } case PACKET_FANOUT_DATA: { - /* Paired with the WRITE_ONCE() in fanout_add() */ - if (!READ_ONCE(po->fanout)) + if (!po->fanout) return -EINVAL; return fanout_set_data(po, optval, optlen); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index a734d47c5eb1..f6aa532bcbf6 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -878,7 +878,6 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) err = pep_accept_conn(newsk, skb); if (err) { - __sock_put(sk); sock_put(newsk); newsk = NULL; goto drop; @@ -957,8 +956,6 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) ret = -EBUSY; else if (sk->sk_state == TCP_ESTABLISHED) ret = -EISCONN; - else if (!pn->pn_sk.sobject) - ret = -EADDRNOTAVAIL; else ret = pep_sock_enable(sk, NULL, 0); release_sock(sk); diff --git a/net/rds/recv.c b/net/rds/recv.c index 1ff4bc3237f0..9bf812509e0e 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -482,7 +482,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, if (rds_cmsg_recv(inc, msg)) { ret = -EFAULT; - break; + goto out; } rds_stats_inc(s_recv_delivered); diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c index 0f371e50d9c4..344456206b70 100644 --- a/net/rose/rose_loopback.c +++ b/net/rose/rose_loopback.c @@ -99,19 +99,10 @@ static void rose_loopback_timer(unsigned long param) } if (frametype == ROSE_CALL_REQUEST) { - if (!rose_loopback_neigh->dev) { - kfree_skb(skb); - continue; - } - - dev = rose_dev_get(dest); - if (!dev) { - kfree_skb(skb); - continue; - } - - if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) { - dev_put(dev); + if ((dev = rose_dev_get(dest)) != NULL) { + if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) + kfree_skb(skb); + } else { kfree_skb(skb); } } else { diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index f4ad63d6e540..ea615e53eab2 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -1116,7 +1116,7 @@ static long rxrpc_read(const struct key *key, default: /* we have a ticket we can't encode */ pr_err("Unsupported key token type (%u)\n", token->security_index); - return -ENOPKG; + continue; } _debug("token[%u]: toksize=%u", ntoks, toksize); @@ -1236,9 +1236,7 @@ static long rxrpc_read(const struct key *key, break; default: - pr_err("Unsupported key token type (%u)\n", - token->security_index); - return -ENOPKG; + break; } ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==, diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 3d891b11c077..755e9ff40fca 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -273,13 +273,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (tb[TCA_TCINDEX_MASK]) cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - if (tb[TCA_TCINDEX_SHIFT]) { + if (tb[TCA_TCINDEX_SHIFT]) cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); - if (cp->shift > 16) { - err = -EINVAL; - goto errout; - } - } + if (!cp->hash) { /* Hash not specified, use perfect hash if the upper limit * of the hashing index is below the threshold. diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4ee2e9a3d12e..a4b492bb7fe5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -391,8 +391,7 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta { struct qdisc_rate_table *rtab; - if (tab == NULL || r->rate == 0 || - r->cell_log == 0 || r->cell_log >= 32 || + if (tab == NULL || r->rate == 0 || r->cell_log == 0 || nla_len(tab) != TC_RTAB_SIZE) return NULL; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index fee59e25929c..2812de74c9a7 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -423,7 +423,6 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) struct sk_buff **old = NULL; unsigned int mask; u32 max_P; - u8 *stab; if (opt == NULL) return -EINVAL; @@ -439,8 +438,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); - stab = nla_data(tb[TCA_CHOKE_STAB]); - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) + + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) return -EINVAL; if (ctl->limit > CHOKE_MAX_QUEUE) @@ -493,7 +492,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt) red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, - stab, + nla_data(tb[TCA_CHOKE_STAB]), max_P); red_set_vars(&q->vars); diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index f5d2c32dae24..5f8f6d94336c 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -404,8 +404,7 @@ static void dsmark_reset(struct Qdisc *sch) struct dsmark_qdisc_data *p = qdisc_priv(sch); pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); - if (p->q) - qdisc_reset(p->q); + qdisc_reset(p->q); sch->qstats.backlog = 0; sch->q.qlen = 0; } diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 6c99b833f665..2e4bd2c0a50c 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -151,9 +151,6 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit) if (strncmp(q->ops->id + 1, "fifo", 4) != 0) return 0; - if (!q->ops->change) - return 0; - nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (nla) { nla->nla_type = RTM_NEWQDISC; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 357e52455be6..aa9ed0440de2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -971,7 +971,6 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; - r->mpu = conf->mpu; r->rate_bytes_ps = max_t(u64, conf->rate, rate64); r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 2f73232031c6..a5745cb2d014 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -389,7 +389,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) return -EINVAL; if (!q) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index d466fab84261..8dabd8257b49 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1479,8 +1479,10 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) if (err < 0) return err; - max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1, - QFQ_MAX_AGG_CLASSES); + if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES) + max_classes = QFQ_MAX_AGG_CLASSES; + else + max_classes = qdisc_dev(sch)->tx_queue_len + 1; /* max_cl_shift = floor(log_2(max_classes)) */ max_cl_shift = __fls(max_classes); q->max_agg_classes = 1<qth_min, ctl->qth_max, ctl->Wlog, - ctl->Scell_log, stab)) + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) return -EINVAL; if (ctl->limit > 0) { @@ -228,7 +225,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt) red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, - stab, + nla_data(tb[TCA_RED_STAB]), max_P); red_set_vars(&q->vars); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 0b27487fd07d..7929c1a11e12 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -645,7 +645,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) } if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, - ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) + ctl_v1->Wlog)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index a7ecf626e998..e02687185a59 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -138,9 +138,6 @@ teql_destroy(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m; - if (!master) - return; - prev = master->slaves; if (prev) { do { diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 40fd399a1035..664215448d09 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -284,15 +284,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, rawaddr = (union sctp_addr_param *)raw_addr_list; af = sctp_get_af_specific(param_type2af(param->type)); - if (unlikely(!af) || - !af->from_addr_param(&addr, rawaddr, htons(port), 0)) { + if (unlikely(!af)) { retval = -EINVAL; - goto out_err; + sctp_bind_addr_clean(bp); + break; } + af->from_addr_param(&addr, rawaddr, htons(port), 0); retval = sctp_add_bind_addr(bp, &addr, SCTP_ADDR_SRC, gfp); - if (retval) - goto out_err; + if (retval) { + /* Can't finish building the list, clean up. */ + sctp_bind_addr_clean(bp); + break; + } len = ntohs(param->length); addrs_len -= len; @@ -300,12 +304,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, } return retval; - -out_err: - if (retval) - sctp_bind_addr_clean(bp); - - return retval; } /******************************************************************** diff --git a/net/sctp/input.c b/net/sctp/input.c index 3f0b8aafc21a..9fa89a35afcd 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -972,8 +972,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, if (!af) continue; - if (!af->from_addr_param(paddr, params.addr, sh->source, 0)) - continue; + af->from_addr_param(paddr, params.addr, sh->source, 0); asoc = __sctp_lookup_association(net, laddr, paddr, &transport); if (asoc) @@ -1009,9 +1008,6 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( union sctp_addr_param *param; union sctp_addr paddr; - if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr)) - return NULL; - /* Skip over the ADDIP header and find the Address parameter */ param = (union sctp_addr_param *)(asconf + 1); @@ -1019,8 +1015,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( if (unlikely(!af)) return NULL; - if (af->from_addr_param(&paddr, param, peer_port, 0)) - return NULL; + af->from_addr_param(&paddr, param, peer_port, 0); return __sctp_lookup_association(net, laddr, &paddr, transportp); } @@ -1091,7 +1086,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net, ch = (sctp_chunkhdr_t *) ch_end; chunk_num++; - } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb)); + } while (ch_end < skb_tail_pointer(skb)); return asoc; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 62c729402a04..1a6849add0e3 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -488,20 +488,15 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) } /* Initialize a sctp_addr from an address parameter. */ -static bool sctp_v6_from_addr_param(union sctp_addr *addr, +static void sctp_v6_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { - if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param)) - return false; - addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; /* BUG */ addr->v6.sin6_addr = param->v6.addr; addr->v6.sin6_scope_id = iif; - - return true; } /* Initialize an address parameter from a sctp_addr and return the length diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 510b805aab2d..b0e401dfe160 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -272,19 +272,14 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) } /* Initialize a sctp_addr from an address parameter. */ -static bool sctp_v4_from_addr_param(union sctp_addr *addr, +static void sctp_v4_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { - if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param)) - return false; - addr->v4.sin_family = AF_INET; addr->v4.sin_port = port; addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); - - return true; } /* Initialize an address parameter from a sctp_addr and return the length @@ -416,8 +411,7 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || - ipv4_is_private_192(addr->v4.sin_addr.s_addr) || - ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { + ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d31e0d6c641b..e3e44237de1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2146,16 +2146,9 @@ static sctp_ierror_t sctp_verify_param(struct net *net, break; case SCTP_PARAM_SET_PRIMARY: - if (!net->sctp.addip_enable) - goto fallthrough; - - if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) + - sizeof(struct sctp_paramhdr)) { - sctp_process_inv_paramlength(asoc, param.p, - chunk, err_chunk); - retval = SCTP_IERROR_ABORT; - } - break; + if (net->sctp.addip_enable) + break; + goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ @@ -2333,13 +2326,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { - if (!src_match && - (param.p->type == SCTP_PARAM_IPV4_ADDRESS || - param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { + if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || + param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); - if (!af->from_addr_param(&addr, param.addr, - chunk->sctp_hdr->source, 0)) - continue; + af->from_addr_param(&addr, param.addr, + chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } @@ -2533,8 +2524,7 @@ static int sctp_process_param(struct sctp_association *asoc, break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); - if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0)) - break; + af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) @@ -2627,13 +2617,15 @@ do_addr_param: addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(addr_param->p.type)); - if (!af) + if (af == NULL) break; - if (!af->from_addr_param(&addr, addr_param, - htons(asoc->peer.port), 0)) - break; + af->from_addr_param(&addr, addr_param, + htons(asoc->peer.port), 0); + /* if the address is invalid, we can't process it. + * XXX: see spec for what to do. + */ if (!af->addr_valid(&addr, NULL, NULL)) break; @@ -3043,8 +3035,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; - if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0)) - return SCTP_ERROR_DNS_FAILED; + af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. @@ -3128,7 +3119,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, * primary. */ if (af->is_any(&addr)) - memcpy(&addr, sctp_source(asconf), sizeof(addr)); + memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) @@ -3310,8 +3301,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); - if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0)) - return; + af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 53bb631ec490..a9a72f7e0cd7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1851,8 +1851,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); - if (asoc->state < SCTP_STATE_ESTABLISHED) - SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); repl = sctp_make_cookie_ack(new_asoc, chunk); @@ -4337,9 +4336,6 @@ sctp_disposition_t sctp_sf_violation(struct net *net, { struct sctp_chunk *chunk = arg; - if (!sctp_vtag_verify(chunk, asoc)) - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - /* Make sure that the chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, @@ -6030,7 +6026,6 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, * yet. */ switch (chunk->chunk_hdr->type) { - case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: { sctp_initack_chunk_t *initack; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fcac88f1774b..62ba9a49c126 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -352,18 +352,6 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, return af; } -static void sctp_auto_asconf_init(struct sctp_sock *sp) -{ - struct net *net = sock_net(&sp->inet.sk); - - if (net->sctp.default_auto_asconf) { - spin_lock(&net->sctp.addr_wq_lock); - list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); - spin_unlock(&net->sctp.addr_wq_lock); - sp->do_auto_asconf = 1; - } -} - /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { @@ -426,10 +414,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) } /* Refresh ephemeral port. */ - if (!bp->port) { + if (!bp->port) bp->port = inet_sk(sk)->inet_num; - sctp_auto_asconf_init(sp); - } /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. @@ -4175,6 +4161,19 @@ static int sctp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); + /* Nothing can fail after this block, otherwise + * sctp_destroy_sock() will be called without addr_wq_lock held + */ + if (net->sctp.default_auto_asconf) { + spin_lock(&sock_net(sk)->sctp.addr_wq_lock); + list_add_tail(&sp->auto_asconf_list, + &net->sctp.auto_asconf_splist); + sp->do_auto_asconf = 1; + spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); + } else { + sp->do_auto_asconf = 0; + } + local_bh_enable(); return 0; @@ -7338,8 +7337,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); - sctp_auto_asconf_init(newsp); - /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index 7404f02702a1..8391c2785550 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -184,7 +184,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf, scope_id = dev->ifindex; dev_put(dev); } else { - if (kstrtou32(p, 10, &scope_id) != 0) { + if (kstrtou32(p, 10, &scope_id) == 0) { kfree(p); return 0; } diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 7bde2976307e..62fca77bf3c7 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -53,7 +53,6 @@ #include #include -#include "auth_gss_internal.h" #include "../netns.h" static const struct rpc_authops authgss_ops; @@ -148,6 +147,35 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); } +static const void * +simple_get_bytes(const void *p, const void *end, void *res, size_t len) +{ + const void *q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + memcpy(res, p, len); + return q; +} + +static inline const void * +simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +{ + const void *q; + unsigned int len; + + p = simple_get_bytes(p, end, &len, sizeof(len)); + if (IS_ERR(p)) + return p; + q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + dest->data = kmemdup(p, len, GFP_NOFS); + if (unlikely(dest->data == NULL)) + return ERR_PTR(-ENOMEM); + dest->len = len; + return q; +} + static struct gss_cl_ctx * gss_cred_get_ctx(struct rpc_cred *cred) { diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h deleted file mode 100644 index f6d9631bd9d0..000000000000 --- a/net/sunrpc/auth_gss/auth_gss_internal.h +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -/* - * linux/net/sunrpc/auth_gss/auth_gss_internal.h - * - * Internal definitions for RPCSEC_GSS client authentication - * - * Copyright (c) 2000 The Regents of the University of Michigan. - * All rights reserved. - * - */ -#include -#include -#include - -static inline const void * -simple_get_bytes(const void *p, const void *end, void *res, size_t len) -{ - const void *q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - memcpy(res, p, len); - return q; -} - -static inline const void * -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) -{ - const void *q; - unsigned int len; - - p = simple_get_bytes(p, end, &len, sizeof(len)); - if (IS_ERR(p)) - return p; - q = (const void *)((const char *)p + len); - if (unlikely(q > end || q < p)) - return ERR_PTR(-EFAULT); - if (len) { - dest->data = kmemdup(p, len, GFP_NOFS); - if (unlikely(dest->data == NULL)) - return ERR_PTR(-ENOMEM); - } else - dest->data = NULL; - dest->len = len; - return q; -} diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 89e616da161f..28db442a0034 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -45,8 +45,6 @@ #include #include -#include "auth_gss_internal.h" - #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_AUTH #endif @@ -188,6 +186,35 @@ get_gss_krb5_enctype(int etype) return NULL; } +static const void * +simple_get_bytes(const void *p, const void *end, void *res, int len) +{ + const void *q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + memcpy(res, p, len); + return q; +} + +static const void * +simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) +{ + const void *q; + unsigned int len; + + p = simple_get_bytes(p, end, &len, sizeof(len)); + if (IS_ERR(p)) + return p; + q = (const void *)((const char *)p + len); + if (unlikely(q > end || q < p)) + return ERR_PTR(-EFAULT); + res->data = kmemdup(p, len, GFP_NOFS); + if (unlikely(res->data == NULL)) + return ERR_PTR(-ENOMEM); + res->len = len; + return q; +} + static inline const void * get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_blkcipher **res) diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index daf0c1ea3917..91263d6a103b 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1697,14 +1697,11 @@ static int svcauth_gss_release(struct svc_rqst *rqstp) { struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; - struct rpc_gss_wire_cred *gc; + struct rpc_gss_wire_cred *gc = &gsd->clcred; struct xdr_buf *resbuf = &rqstp->rq_res; int stat = -EINVAL; struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); - if (!gsd) - goto out; - gc = &gsd->clcred; if (gc->gc_proc != RPC_GSS_PROC_DATA) goto out; /* Release can be called twice, but we only wrap once. */ @@ -1745,10 +1742,10 @@ out_err: if (rqstp->rq_cred.cr_group_info) put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; - if (gsd && gsd->rsci) { + if (gsd->rsci) cache_put(&gsd->rsci->h, sn->rsc_cache); - gsd->rsci = NULL; - } + gsd->rsci = NULL; + return stat; } @@ -1845,7 +1842,7 @@ gss_svc_init_net(struct net *net) goto out2; return 0; out2: - rsi_cache_destroy_net(net); + destroy_use_gss_proxy_proc_entry(net); out1: rsc_cache_destroy_net(net); return rv; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 737556204566..3eed71a2ff2b 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -472,21 +472,11 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q struct list_head *q; struct rpc_task *task; - /* - * Service the privileged queue. - */ - q = &queue->tasks[RPC_NR_PRIORITY - 1]; - if (queue->maxpriority > RPC_PRIORITY_PRIVILEGED && !list_empty(q)) { - task = list_first_entry(q, struct rpc_task, u.tk_wait.list); - goto out; - } - /* * Service a batch of tasks from a single owner. */ q = &queue->tasks[queue->priority]; - if (!list_empty(q) && queue->nr) { - queue->nr--; + if (!list_empty(q) && --queue->nr) { task = list_first_entry(q, struct rpc_task, u.tk_wait.list); goto out; } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 7629982040c4..a7cd03165680 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1011,7 +1011,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st struct svc_xprt *xprt; int ret = 0; - spin_lock_bh(&serv->sv_lock); + spin_lock(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; @@ -1019,7 +1019,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } - spin_unlock_bh(&serv->sv_lock); + spin_unlock(&serv->sv_lock); return ret; } diff --git a/net/tipc/link.c b/net/tipc/link.c index 6e4ade6cc653..0080699b7cd1 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1223,14 +1223,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; - u32 dlen = msg_data_sz(hdr); int mtyp = msg_type(hdr); char *if_name; int rc = 0; - if (dlen > U16_MAX) - goto exit; - if (tipc_link_is_blocked(l) || !xmitq) goto exit; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 6bac0e6e4643..f3c7e5d1fc57 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -139,13 +139,18 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (unlikely(head)) goto err; *buf = NULL; - if (skb_has_frag_list(frag) && __skb_linearize(frag)) - goto err; frag = skb_unshare(frag, GFP_ATOMIC); if (unlikely(!frag)) goto err; head = *headbuf = frag; TIPC_SKB_CB(head)->tail = NULL; + if (skb_is_nonlinear(head)) { + skb_walk_frags(head, tail) { + TIPC_SKB_CB(head)->tail = tail; + } + } else { + skb_frag_list_init(head); + } return 0; } diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 81ca1d5980c1..b57675f81ceb 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -329,7 +329,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, return true; } } else { - pr_warn_ratelimited("Unknown name table message received\n"); + pr_warn("Unrecognized name table message received\n"); } return false; } diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index fb1b5dcf0142..0975a28f8686 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -632,7 +632,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); - link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); + link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME], TIPC_MAX_LINK_NAME); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9d380d55ea1c..65171f8e8c45 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -763,9 +763,6 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, spin_lock_bh(&inputq->lock); if (skb_peek(arrvq) == skb) { skb_queue_splice_tail_init(&tmpq, inputq); - /* Decrease the skb's refcnt as increasing in the - * function tipc_skb_peek - */ kfree_skb(__skb_dequeue(arrvq)); } spin_unlock_bh(&inputq->lock); @@ -1757,7 +1754,7 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, u32 dport, struct sk_buff_head *xmitq) { - unsigned long time_limit = jiffies + usecs_to_jiffies(20000); + unsigned long time_limit = jiffies + 2; struct sk_buff *skb; unsigned int lim; atomic_t *dcnt; @@ -1987,7 +1984,7 @@ static int tipc_listen(struct socket *sock, int len) static int tipc_wait_for_accept(struct socket *sock, long timeo) { struct sock *sk = sock->sk; - DEFINE_WAIT_FUNC(wait, woken_wake_function); + DEFINE_WAIT(wait); int err; /* True wake-one mechanism for incoming connections: only @@ -1996,12 +1993,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) * anymore, the common case will execute the loop only once. */ for (;;) { + prepare_to_wait_exclusive(sk_sleep(sk), &wait, + TASK_INTERRUPTIBLE); if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { - add_wait_queue(sk_sleep(sk), &wait); release_sock(sk); - timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + timeo = schedule_timeout(timeo); lock_sock(sk); - remove_wait_queue(sk_sleep(sk), &wait); } err = 0; if (!skb_queue_empty(&sk->sk_receive_queue)) @@ -2016,6 +2013,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) if (signal_pending(current)) break; } + finish_wait(sk_sleep(sk), &wait); return err; } diff --git a/net/unix/Kconfig b/net/unix/Kconfig index 3b9e450656a4..8b31ab85d050 100644 --- a/net/unix/Kconfig +++ b/net/unix/Kconfig @@ -19,11 +19,6 @@ config UNIX Say Y unless you know what you are doing. -config UNIX_SCM - bool - depends on UNIX - default y - config UNIX_DIAG tristate "UNIX: socket monitoring interface" depends on UNIX diff --git a/net/unix/Makefile b/net/unix/Makefile index dc686c6757fb..b663c607b1c6 100644 --- a/net/unix/Makefile +++ b/net/unix/Makefile @@ -9,5 +9,3 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o obj-$(CONFIG_UNIX_DIAG) += unix_diag.o unix_diag-y := diag.o - -obj-$(CONFIG_UNIX_SCM) += scm.o diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2c09eadfa90c..014e35c2723a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -118,8 +118,6 @@ #include #include -#include "scm.h" - struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); DEFINE_SPINLOCK(unix_table_lock); @@ -536,14 +534,12 @@ static void unix_release_sock(struct sock *sk, int embrion) u->path.mnt = NULL; state = sk->sk_state; sk->sk_state = TCP_CLOSE; - - skpair = unix_peer(sk); - unix_peer(sk) = NULL; - unix_state_unlock(sk); wake_up_interruptible_all(&u->peer_wait); + skpair = unix_peer(sk); + if (skpair != NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); @@ -558,6 +554,7 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_dgram_peer_wake_disconnect(sk, skpair); sock_put(skpair); /* It may now die */ + unix_peer(sk) = NULL; } /* Try to flush out this socket. Throw out buffers at least */ @@ -594,42 +591,20 @@ static void unix_release_sock(struct sock *sk, int embrion) static void init_peercred(struct sock *sk) { - const struct cred *old_cred; - struct pid *old_pid; - - spin_lock(&sk->sk_peer_lock); - old_pid = sk->sk_peer_pid; - old_cred = sk->sk_peer_cred; + put_pid(sk->sk_peer_pid); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); - spin_unlock(&sk->sk_peer_lock); - - put_pid(old_pid); - put_cred(old_cred); } static void copy_peercred(struct sock *sk, struct sock *peersk) { - const struct cred *old_cred; - struct pid *old_pid; - - if (sk < peersk) { - spin_lock(&sk->sk_peer_lock); - spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); - } else { - spin_lock(&peersk->sk_peer_lock); - spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); - } - old_pid = sk->sk_peer_pid; - old_cred = sk->sk_peer_cred; + put_pid(sk->sk_peer_pid); + if (sk->sk_peer_cred) + put_cred(sk->sk_peer_cred); sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); - - spin_unlock(&sk->sk_peer_lock); - spin_unlock(&peersk->sk_peer_lock); - - put_pid(old_pid); - put_cred(old_cred); } static int unix_listen(struct socket *sock, int backlog) @@ -1528,51 +1503,78 @@ out: return err; } -static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) +static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + int i; + + scm->fp = UNIXCB(skb).fp; + UNIXCB(skb).fp = NULL; + + for (i = scm->fp->count-1; i >= 0; i--) + unix_notinflight(scm->fp->user, scm->fp->fp[i]); +} + +static void unix_destruct_scm(struct sk_buff *skb) +{ + struct scm_cookie scm; + memset(&scm, 0, sizeof(scm)); + scm.pid = UNIXCB(skb).pid; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + /* Alas, it calls VFS */ + /* So fscking what? fput() had been SMP-safe since the last Summer */ + scm_destroy(&scm); + sock_wfree(skb); +} + +/* + * The "user->unix_inflight" variable is protected by the garbage + * collection lock, and we just read it locklessly here. If you go + * over the limit, there might be a tiny race in actually noticing + * it across threads. Tough. + */ +static inline bool too_many_unix_fds(struct task_struct *p) +{ + struct user_struct *user = current_user(); + + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); + return false; +} + +#define MAX_RECURSION_LEVEL 4 + +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { - scm->fp = scm_fp_dup(UNIXCB(skb).fp); + int i; + unsigned char max_level = 0; + + if (too_many_unix_fds(current)) + return -ETOOMANYREFS; + + for (i = scm->fp->count - 1; i >= 0; i--) { + struct sock *sk = unix_get_socket(scm->fp->fp[i]); + + if (sk) + max_level = max(max_level, + unix_sk(sk)->recursion_level); + } + if (unlikely(max_level > MAX_RECURSION_LEVEL)) + return -ETOOMANYREFS; /* - * Garbage collection of unix sockets starts by selecting a set of - * candidate sockets which have reference only from being in flight - * (total_refs == inflight_refs). This condition is checked once during - * the candidate collection phase, and candidates are marked as such, so - * that non-candidates can later be ignored. While inflight_refs is - * protected by unix_gc_lock, total_refs (file count) is not, hence this - * is an instantaneous decision. - * - * Once a candidate, however, the socket must not be reinstalled into a - * file descriptor while the garbage collection is in progress. - * - * If the above conditions are met, then the directed graph of - * candidates (*) does not change while unix_gc_lock is held. - * - * Any operations that changes the file count through file descriptors - * (dup, close, sendmsg) does not change the graph since candidates are - * not installed in fds. - * - * Dequeing a candidate via recvmsg would install it into an fd, but - * that takes unix_gc_lock to decrement the inflight count, so it's - * serialized with garbage collection. - * - * MSG_PEEK is special in that it does not change the inflight count, - * yet does install the socket into an fd. The following lock/unlock - * pair is to ensure serialization with garbage collection. It must be - * done between incrementing the file count and installing the file into - * an fd. - * - * If garbage collection starts after the barrier provided by the - * lock/unlock, then it will see the elevated refcount and not mark this - * as a candidate. If a garbage collection is already in progress - * before the file count was incremented, then the lock/unlock pair will - * ensure that garbage collection is finished before progressing to - * installing the fd. - * - * (*) A -> B where B is on the queue of A or B is on the queue of C - * which is on the queue of listening socket A. + * Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. */ - spin_lock(&unix_gc_lock); - spin_unlock(&unix_gc_lock); + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->user, scm->fp->fp[i]); + return max_level; } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) @@ -2200,7 +2202,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, sk_peek_offset_fwd(sk, size); if (UNIXCB(skb).fp) - unix_peek_fds(&scm, skb); + scm.fp = scm_fp_dup(UNIXCB(skb).fp); } err = (flags & MSG_TRUNC) ? skb->len - skip : size; @@ -2445,7 +2447,7 @@ unlock: /* It is questionable, see note in unix_dgram_recvmsg. */ if (UNIXCB(skb).fp) - unix_peek_fds(&scm, skb); + scm.fp = scm_fp_dup(UNIXCB(skb).fp); sk_peek_offset_fwd(sk, chunk); @@ -2722,7 +2724,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, other = unix_peer(sk); if (other && unix_peer(other) != sk && - unix_recvq_full_lockless(other) && + unix_recvq_full(other) && unix_dgram_peer_wake_me(sk, other)) writable = 0; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 4d283e26d816..c36757e72844 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -86,13 +86,77 @@ #include #include -#include "scm.h" - /* Internal data structures and random procedures: */ +static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); +static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); +unsigned int unix_tot_inflight; + +struct sock *unix_get_socket(struct file *filp) +{ + struct sock *u_sock = NULL; + struct inode *inode = file_inode(filp); + + /* Socket ? */ + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { + struct socket *sock = SOCKET_I(inode); + struct sock *s = sock->sk; + + /* PF_UNIX ? */ + if (s && sock->ops && sock->ops->family == PF_UNIX) + u_sock = s; + } + return u_sock; +} + +/* Keep the number of times in flight count for the file + * descriptor if it is for an AF_UNIX socket. + */ + +void unix_inflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + if (atomic_long_inc_return(&u->inflight) == 1) { + BUG_ON(!list_empty(&u->link)); + list_add_tail(&u->link, &gc_inflight_list); + } else { + BUG_ON(list_empty(&u->link)); + } + unix_tot_inflight++; + } + user->unix_inflight++; + spin_unlock(&unix_gc_lock); +} + +void unix_notinflight(struct user_struct *user, struct file *fp) +{ + struct sock *s = unix_get_socket(fp); + + spin_lock(&unix_gc_lock); + + if (s) { + struct unix_sock *u = unix_sk(s); + + BUG_ON(!atomic_long_read(&u->inflight)); + BUG_ON(list_empty(&u->link)); + + if (atomic_long_dec_and_test(&u->inflight)) + list_del_init(&u->link); + unix_tot_inflight--; + } + user->unix_inflight--; + spin_unlock(&unix_gc_lock); +} + static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { @@ -197,11 +261,8 @@ void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. - * Paired with the WRITE_ONCE() in unix_inflight(), - * unix_notinflight() and gc_in_progress(). */ - if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && - !READ_ONCE(gc_in_progress)) + if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } @@ -221,9 +282,7 @@ void unix_gc(void) if (gc_in_progress) goto out; - /* Paired with READ_ONCE() in wait_for_unix_gc(). */ - WRITE_ONCE(gc_in_progress, true); - + gc_in_progress = true; /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. @@ -309,10 +368,7 @@ void unix_gc(void) /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); - - /* Paired with READ_ONCE() in wait_for_unix_gc(). */ - WRITE_ONCE(gc_in_progress, false); - + gc_in_progress = false; wake_up(&unix_gc_wait); out: diff --git a/net/unix/scm.c b/net/unix/scm.c deleted file mode 100644 index bf1a8fa8c4f1..000000000000 --- a/net/unix/scm.c +++ /dev/null @@ -1,163 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "scm.h" - -unsigned int unix_tot_inflight; -EXPORT_SYMBOL(unix_tot_inflight); - -LIST_HEAD(gc_inflight_list); -EXPORT_SYMBOL(gc_inflight_list); - -DEFINE_SPINLOCK(unix_gc_lock); -EXPORT_SYMBOL(unix_gc_lock); - -struct sock *unix_get_socket(struct file *filp) -{ - struct sock *u_sock = NULL; - struct inode *inode = file_inode(filp); - - /* Socket ? */ - if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { - struct socket *sock = SOCKET_I(inode); - struct sock *s = sock->sk; - - /* PF_UNIX ? */ - if (s && sock->ops && sock->ops->family == PF_UNIX) - u_sock = s; - } - return u_sock; -} -EXPORT_SYMBOL(unix_get_socket); - -/* Keep the number of times in flight count for the file - * descriptor if it is for an AF_UNIX socket. - */ -void unix_inflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - if (atomic_long_inc_return(&u->inflight) == 1) { - BUG_ON(!list_empty(&u->link)); - list_add_tail(&u->link, &gc_inflight_list); - } else { - BUG_ON(list_empty(&u->link)); - } - /* Paired with READ_ONCE() in wait_for_unix_gc() */ - WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); - } - user->unix_inflight++; - spin_unlock(&unix_gc_lock); -} - -void unix_notinflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - BUG_ON(!atomic_long_read(&u->inflight)); - BUG_ON(list_empty(&u->link)); - - if (atomic_long_dec_and_test(&u->inflight)) - list_del_init(&u->link); - /* Paired with READ_ONCE() in wait_for_unix_gc() */ - WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); - } - user->unix_inflight--; - spin_unlock(&unix_gc_lock); -} - -/* - * The "user->unix_inflight" variable is protected by the garbage - * collection lock, and we just read it locklessly here. If you go - * over the limit, there might be a tiny race in actually noticing - * it across threads. Tough. - */ -static inline bool too_many_unix_fds(struct task_struct *p) -{ - struct user_struct *user = current_user(); - - if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) - return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); - return false; -} - -#define MAX_RECURSION_LEVEL 4 - -int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) -{ - int i; - unsigned char max_level = 0; - - if (too_many_unix_fds(current)) - return -ETOOMANYREFS; - - for (i = scm->fp->count - 1; i >= 0; i--) { - struct sock *sk = unix_get_socket(scm->fp->fp[i]); - - if (sk) - max_level = max(max_level, - unix_sk(sk)->recursion_level); - } - if (unlikely(max_level > MAX_RECURSION_LEVEL)) - return -ETOOMANYREFS; - - /* - * Need to duplicate file references for the sake of garbage - * collection. Otherwise a socket in the fps might become a - * candidate for GC while the skb is not yet queued. - */ - UNIXCB(skb).fp = scm_fp_dup(scm->fp); - if (!UNIXCB(skb).fp) - return -ENOMEM; - - for (i = scm->fp->count - 1; i >= 0; i--) - unix_inflight(scm->fp->user, scm->fp->fp[i]); - return max_level; -} -EXPORT_SYMBOL(unix_attach_fds); - -void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) -{ - int i; - - scm->fp = UNIXCB(skb).fp; - UNIXCB(skb).fp = NULL; - - for (i = scm->fp->count-1; i >= 0; i--) - unix_notinflight(scm->fp->user, scm->fp->fp[i]); -} -EXPORT_SYMBOL(unix_detach_fds); - -void unix_destruct_scm(struct sk_buff *skb) -{ - struct scm_cookie scm; - - memset(&scm, 0, sizeof(scm)); - scm.pid = UNIXCB(skb).pid; - if (UNIXCB(skb).fp) - unix_detach_fds(&scm, skb); - - /* Alas, it calls VFS */ - /* So fscking what? fput() had been SMP-safe since the last Summer */ - scm_destroy(&scm); - sock_wfree(skb); -} -EXPORT_SYMBOL(unix_destruct_scm); diff --git a/net/unix/scm.h b/net/unix/scm.h deleted file mode 100644 index 5a255a477f16..000000000000 --- a/net/unix/scm.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef NET_UNIX_SCM_H -#define NET_UNIX_SCM_H - -extern struct list_head gc_inflight_list; -extern spinlock_t unix_gc_lock; - -int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); -void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); - -#endif diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index baab5f65fbeb..3a2543b9701a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -650,7 +650,6 @@ struct sock *__vsock_create(struct net *net, vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; - security_sk_clone(parent, sk); } else { vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); vsk->owner = get_current_cred(); @@ -831,12 +830,10 @@ static int vsock_shutdown(struct socket *sock, int mode) */ sk = sock->sk; - - lock_sock(sk); if (sock->state == SS_UNCONNECTED) { err = -ENOTCONN; if (sk->sk_type == SOCK_STREAM) - goto out; + return err; } else { sock->state = SS_DISCONNECTING; err = 0; @@ -845,8 +842,10 @@ static int vsock_shutdown(struct socket *sock, int mode) /* Receive and send shutdowns are treated alike. */ mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); if (mode) { + lock_sock(sk); sk->sk_shutdown |= mode; sk->sk_state_change(sk); + release_sock(sk); if (sk->sk_type == SOCK_STREAM) { sock_reset_flag(sk, SOCK_DONE); @@ -854,8 +853,6 @@ static int vsock_shutdown(struct socket *sock, int mode) } } -out: - release_sock(sk); return err; } @@ -1176,8 +1173,6 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, * non-blocking call. */ err = -EALREADY; - if (flags & O_NONBLOCK) - goto out; break; default: if ((sk->sk_state == VSOCK_SS_LISTEN) || diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index ddcae46ae408..9c07c76c504d 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -25,10 +25,6 @@ /* How long to wait for graceful shutdown of a connection */ #define VSOCK_CLOSE_TIMEOUT (8 * HZ) -uint virtio_transport_max_vsock_pkt_buf_size = 64 * 1024; -module_param(virtio_transport_max_vsock_pkt_buf_size, uint, 0444); -EXPORT_SYMBOL_GPL(virtio_transport_max_vsock_pkt_buf_size); - static const struct virtio_transport *virtio_transport_get_ops(void) { const struct vsock_transport *t = vsock_core_get_transport(); diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index c09efcdf72d2..102bf9194662 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -593,7 +593,8 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, peer, flags, VMCI_NO_PRIVILEGE_FLAGS); out: if (err < 0) { - pr_err_once("Could not attach to queue pair with %d\n", err); + pr_err("Could not attach to queue pair with %d\n", + err); err = vmci_transport_error_to_vsock_error(err); } diff --git a/net/wireguard/Makefile b/net/wireguard/Makefile index c17546eaeedc..42795ac08f6b 100644 --- a/net/wireguard/Makefile +++ b/net/wireguard/Makefile @@ -2,9 +2,10 @@ # # Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' -ccflags-y += -Wframe-larger-than=2048 +ccflags-y := -O3 ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -g +ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' +ccflags-y += -Wframe-larger-than=2048 ccflags-$(if $(WIREGUARD_VERSION),y,) += -D'WIREGUARD_VERSION="$(WIREGUARD_VERSION)"' wireguard-y := main.o noise.o device.o peer.o timers.o queueing.o send.o receive.o socket.o peerlookup.o allowedips.o ratelimiter.o cookie.o netlink.o diff --git a/net/wireguard/allowedips.c b/net/wireguard/allowedips.c index 9a4c8ff32d9d..23c2285c2f72 100644 --- a/net/wireguard/allowedips.c +++ b/net/wireguard/allowedips.c @@ -6,8 +6,6 @@ #include "allowedips.h" #include "peer.h" -static struct kmem_cache *node_cache; - static void swap_endian(u8 *dst, const u8 *src, u8 bits) { if (bits == 32) { @@ -30,10 +28,12 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, node->bitlen = bits; memcpy(node->bits, src, bits / 8U); } +#define CHOOSE_NODE(parent, key) \ + parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] -static inline u8 choose(struct allowedips_node *node, const u8 *key) +static void node_free_rcu(struct rcu_head *rcu) { - return (key[node->bit_at_a] >> node->bit_at_b) & 1; + kfree(container_of(rcu, struct allowedips_node, rcu)); } static void push_rcu(struct allowedips_node **stack, @@ -45,11 +45,6 @@ static void push_rcu(struct allowedips_node **stack, } } -static void node_free_rcu(struct rcu_head *rcu) -{ - kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); -} - static void root_free_rcu(struct rcu_head *rcu) { struct allowedips_node *node, *stack[128] = { @@ -59,7 +54,7 @@ static void root_free_rcu(struct rcu_head *rcu) while (len > 0 && (node = stack[--len])) { push_rcu(stack, node->bit[0], &len); push_rcu(stack, node->bit[1], &len); - kmem_cache_free(node_cache, node); + kfree(node); } } @@ -76,6 +71,60 @@ static void root_remove_peer_lists(struct allowedips_node *root) } } +static void walk_remove_by_peer(struct allowedips_node __rcu **top, + struct wg_peer *peer, struct mutex *lock) +{ +#define REF(p) rcu_access_pointer(p) +#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) +#define PUSH(p) ({ \ + WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ + stack[len++] = p; \ + }) + + struct allowedips_node __rcu **stack[128], **nptr; + struct allowedips_node *node, *prev; + unsigned int len; + + if (unlikely(!peer || !REF(*top))) + return; + + for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { + nptr = stack[len - 1]; + node = DEREF(nptr); + if (!node) { + --len; + continue; + } + if (!prev || REF(prev->bit[0]) == node || + REF(prev->bit[1]) == node) { + if (REF(node->bit[0])) + PUSH(&node->bit[0]); + else if (REF(node->bit[1])) + PUSH(&node->bit[1]); + } else if (REF(node->bit[0]) == prev) { + if (REF(node->bit[1])) + PUSH(&node->bit[1]); + } else { + if (rcu_dereference_protected(node->peer, + lockdep_is_held(lock)) == peer) { + RCU_INIT_POINTER(node->peer, NULL); + list_del_init(&node->peer_list); + if (!node->bit[0] || !node->bit[1]) { + rcu_assign_pointer(*nptr, DEREF( + &node->bit[!REF(node->bit[0])])); + call_rcu(&node->rcu, node_free_rcu); + node = DEREF(nptr); + } + } + --len; + } + } + +#undef REF +#undef DEREF +#undef PUSH +} + static unsigned int fls128(u64 a, u64 b) { return a ? fls64(a) + 64U : fls64(b); @@ -115,7 +164,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, found = node; if (node->cidr == bits) break; - node = rcu_dereference_bh(node->bit[choose(node, key)]); + node = rcu_dereference_bh(CHOOSE_NODE(node, key)); } return found; } @@ -147,7 +196,8 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, u8 cidr, u8 bits, struct allowedips_node **rnode, struct mutex *lock) { - struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); + struct allowedips_node *node = rcu_dereference_protected(trie, + lockdep_is_held(lock)); struct allowedips_node *parent = NULL; bool exact = false; @@ -157,24 +207,13 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, exact = true; break; } - node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); + node = rcu_dereference_protected(CHOOSE_NODE(parent, key), + lockdep_is_held(lock)); } *rnode = parent; return exact; } -static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) -{ - node->parent_bit_packed = (unsigned long)parent | bit; - rcu_assign_pointer(*parent, node); -} - -static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) -{ - u8 bit = choose(parent, node->bits); - connect_node(&parent->bit[bit], bit, node); -} - static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, u8 cidr, struct wg_peer *peer, struct mutex *lock) { @@ -184,13 +223,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, return -EINVAL; if (!rcu_access_pointer(*trie)) { - node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + node = kzalloc(sizeof(*node), GFP_KERNEL); if (unlikely(!node)) return -ENOMEM; RCU_INIT_POINTER(node->peer, peer); list_add_tail(&node->peer_list, &peer->allowedips_list); copy_and_assign_cidr(node, key, cidr, bits); - connect_node(trie, 2, node); + rcu_assign_pointer(*trie, node); return 0; } if (node_placement(*trie, key, cidr, bits, &node, lock)) { @@ -199,7 +238,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, return 0; } - newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); + newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); if (unlikely(!newnode)) return -ENOMEM; RCU_INIT_POINTER(newnode->peer, peer); @@ -209,10 +248,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, if (!node) { down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); } else { - const u8 bit = choose(node, key); - down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); + down = rcu_dereference_protected(CHOOSE_NODE(node, key), + lockdep_is_held(lock)); if (!down) { - connect_node(&node->bit[bit], bit, newnode); + rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); return 0; } } @@ -220,29 +259,30 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, parent = node; if (newnode->cidr == cidr) { - choose_and_connect_node(newnode, down); + rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); if (!parent) - connect_node(trie, 2, newnode); + rcu_assign_pointer(*trie, newnode); else - choose_and_connect_node(parent, newnode); - return 0; - } + rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), + newnode); + } else { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) { + list_del(&newnode->peer_list); + kfree(newnode); + return -ENOMEM; + } + INIT_LIST_HEAD(&node->peer_list); + copy_and_assign_cidr(node, newnode->bits, cidr, bits); - node = kmem_cache_zalloc(node_cache, GFP_KERNEL); - if (unlikely(!node)) { - list_del(&newnode->peer_list); - kmem_cache_free(node_cache, newnode); - return -ENOMEM; + rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); + rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); + if (!parent) + rcu_assign_pointer(*trie, node); + else + rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), + node); } - INIT_LIST_HEAD(&node->peer_list); - copy_and_assign_cidr(node, newnode->bits, cidr, bits); - - choose_and_connect_node(node, down); - choose_and_connect_node(node, newnode); - if (!parent) - connect_node(trie, 2, node); - else - choose_and_connect_node(parent, node); return 0; } @@ -300,41 +340,9 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, void wg_allowedips_remove_by_peer(struct allowedips *table, struct wg_peer *peer, struct mutex *lock) { - struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; - bool free_parent; - - if (list_empty(&peer->allowedips_list)) - return; ++table->seq; - list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { - list_del_init(&node->peer_list); - RCU_INIT_POINTER(node->peer, NULL); - if (node->bit[0] && node->bit[1]) - continue; - child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], - lockdep_is_held(lock)); - if (child) - child->parent_bit_packed = node->parent_bit_packed; - parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); - *parent_bit = child; - parent = (void *)parent_bit - - offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); - free_parent = !rcu_access_pointer(node->bit[0]) && - !rcu_access_pointer(node->bit[1]) && - (node->parent_bit_packed & 3) <= 1 && - !rcu_access_pointer(parent->peer); - if (free_parent) - child = rcu_dereference_protected( - parent->bit[!(node->parent_bit_packed & 1)], - lockdep_is_held(lock)); - call_rcu(&node->rcu, node_free_rcu); - if (!free_parent) - continue; - if (child) - child->parent_bit_packed = parent->parent_bit_packed; - *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; - call_rcu(&parent->rcu, node_free_rcu); - } + walk_remove_by_peer(&table->root4, peer, lock); + walk_remove_by_peer(&table->root6, peer, lock); } int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) @@ -371,16 +379,4 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, return NULL; } -int __init wg_allowedips_slab_init(void) -{ - node_cache = KMEM_CACHE(allowedips_node, 0); - return node_cache ? 0 : -ENOMEM; -} - -void wg_allowedips_slab_uninit(void) -{ - rcu_barrier(); - kmem_cache_destroy(node_cache); -} - #include "selftest/allowedips.c" diff --git a/net/wireguard/allowedips.h b/net/wireguard/allowedips.h index 2346c797eb4d..e5c83cafcef4 100644 --- a/net/wireguard/allowedips.h +++ b/net/wireguard/allowedips.h @@ -15,11 +15,14 @@ struct wg_peer; struct allowedips_node { struct wg_peer __rcu *peer; struct allowedips_node __rcu *bit[2]; - u8 cidr, bit_at_a, bit_at_b, bitlen; + /* While it may seem scandalous that we waste space for v4, + * we're alloc'ing to the nearest power of 2 anyway, so this + * doesn't actually make a difference. + */ u8 bits[16] __aligned(__alignof(u64)); + u8 cidr, bit_at_a, bit_at_b, bitlen; - /* Keep rarely used members at bottom to be beyond cache line. */ - unsigned long parent_bit_packed; + /* Keep rarely used list at bottom to be beyond cache line. */ union { struct list_head peer_list; struct rcu_head rcu; @@ -30,7 +33,7 @@ struct allowedips { struct allowedips_node __rcu *root4; struct allowedips_node __rcu *root6; u64 seq; -} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ +}; void wg_allowedips_init(struct allowedips *table); void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); @@ -53,7 +56,4 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, bool wg_allowedips_selftest(void); #endif -int wg_allowedips_slab_init(void); -void wg_allowedips_slab_uninit(void); - #endif /* _WG_ALLOWEDIPS_H */ diff --git a/net/wireguard/compat/Makefile.include b/net/wireguard/compat/Makefile.include index a75d5b7e9f82..513dba444a37 100644 --- a/net/wireguard/compat/Makefile.include +++ b/net/wireguard/compat/Makefile.include @@ -6,7 +6,6 @@ kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) ccflags-y += -include $(kbuild-dir)/compat/compat.h asflags-y += -include $(kbuild-dir)/compat/compat-asm.h -LINUXINCLUDE := -DCOMPAT_VERSION=$(VERSION) -DCOMPAT_PATCHLEVEL=$(PATCHLEVEL) -DCOMPAT_SUBLEVEL=$(SUBLEVEL) -I$(kbuild-dir)/compat/version $(LINUXINCLUDE) ifeq ($(wildcard $(srctree)/include/linux/ptr_ring.h),) ccflags-y += -I$(kbuild-dir)/compat/ptr_ring/include diff --git a/net/wireguard/compat/compat-asm.h b/net/wireguard/compat/compat-asm.h index 951fc1094470..4e427e50e9c6 100644 --- a/net/wireguard/compat/compat-asm.h +++ b/net/wireguard/compat/compat-asm.h @@ -10,19 +10,8 @@ #include #include -#ifdef RHEL_MAJOR -#if RHEL_MAJOR == 7 -#define ISRHEL7 -#elif RHEL_MAJOR == 8 -#define ISRHEL8 -#if RHEL_MINOR >= 6 -#define ISCENTOS8S -#endif -#endif -#endif - /* PaX compatibility */ -#if defined(RAP_PLUGIN) && defined(RAP_ENTRY) +#if defined(RAP_PLUGIN) #undef ENTRY #define ENTRY RAP_ENTRY #endif @@ -51,7 +40,7 @@ #undef pull #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISRHEL8) && !defined(SYM_FUNC_START) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) #define SYM_FUNC_START ENTRY #define SYM_FUNC_END ENDPROC #endif diff --git a/net/wireguard/compat/compat.h b/net/wireguard/compat/compat.h index cd1894e90423..9f6e725db531 100644 --- a/net/wireguard/compat/compat.h +++ b/net/wireguard/compat/compat.h @@ -14,10 +14,13 @@ #ifdef RHEL_MAJOR #if RHEL_MAJOR == 7 #define ISRHEL7 +#if RHEL_MINOR == 8 +#define ISCENTOS7 +#endif #elif RHEL_MAJOR == 8 #define ISRHEL8 -#if RHEL_MINOR >= 6 -#define ISCENTOS8S +#if RHEL_MINOR == 2 +#define ISCENTOS8 #endif #endif #endif @@ -91,7 +94,7 @@ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 83) #define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup_flow(b, c, d) -#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) && !defined(ISUBUNTU1904)) || (!defined(ISRHEL8) && !defined(ISDEBIAN) && !defined(ISUBUNTU1804) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) && !defined(ISUBUNTU1604) && !defined(ISRHEL7)) +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) && !defined(ISUBUNTU1904)) || (!defined(ISRHEL8) && !defined(ISDEBIAN) && !defined(ISUBUNTU1804) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) && !defined(ISUBUNTU1604) && (!defined(ISRHEL7) || defined(ISCENTOS7))) #define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst #endif @@ -515,28 +518,6 @@ static inline void __compat_kvfree(const void *addr) #define kvfree __compat_kvfree #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) -#include -#include -static inline void *__compat_kvmalloc_array(size_t n, size_t size, gfp_t flags) -{ - if (n != 0 && SIZE_MAX / n < size) - return NULL; - return kvmalloc(n * size, flags); -} -#define kvmalloc_array __compat_kvmalloc_array -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) -#include -#include -static inline void *__compat_kvcalloc(size_t n, size_t size, gfp_t flags) -{ - return kvmalloc_array(n, size, flags | __GFP_ZERO); -} -#define kvcalloc __compat_kvcalloc -#endif - #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9) #include #define priv_destructor destructor @@ -779,7 +760,7 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, #define hlist_add_behind(a, b) hlist_add_after(b, a) #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISRHEL8) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) #define totalram_pages() totalram_pages #endif @@ -845,7 +826,7 @@ static __always_inline void old_rcu_barrier(void) #define COMPAT_CANNOT_DEPRECIATE_BH_RCU #endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) && !defined(ISRHEL8)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 217) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 10) && !defined(ISRHEL8) static inline void skb_mark_not_on_list(struct sk_buff *skb) { skb->next = NULL; @@ -853,16 +834,10 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8) -#include -#ifndef NLA_POLICY_EXACT_LEN #define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len } #endif -#endif #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8) -#include -#ifndef NLA_POLICY_MIN_LEN #define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len } -#endif #define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY #endif @@ -877,7 +852,7 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb) #endif #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) && !defined(ISRHEL8) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) #define genl_dumpit_info(cb) ({ \ struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \ BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \ @@ -960,12 +935,12 @@ static inline int skb_ensure_writable(struct sk_buff *skb, int write_len) } #endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 102) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 178) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 223) && LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 259) || defined(ISRHEL8) || defined(ISUBUNTU1804) -#include -#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) #if IS_ENABLED(CONFIG_NF_NAT) #include +#include #include +#include #include #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8) #include @@ -979,7 +954,6 @@ static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int ct = nf_ct_get(skb_in, &ctinfo); if (!ct || !(ct->status & IPS_SRC_NAT)) { - memset(skb_in->cb, 0, sizeof(skb_in->cb)); icmp_send(skb_in, type, code, info); return; } @@ -995,7 +969,6 @@ static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int orig_ip = ip_hdr(skb_in)->saddr; ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; - memset(skb_in->cb, 0, sizeof(skb_in->cb)); icmp_send(skb_in, type, code, info); ip_hdr(skb_in)->saddr = orig_ip; out: @@ -1010,7 +983,6 @@ static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 ct = nf_ct_get(skb_in, &ctinfo); if (!ct || !(ct->status & IPS_SRC_NAT)) { - memset(skb_in->cb, 0, sizeof(skb_in->cb)); icmpv6_send(skb_in, type, code, info); return; } @@ -1026,23 +998,14 @@ static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 orig_ip = ipv6_hdr(skb_in)->saddr; ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; - memset(skb_in->cb, 0, sizeof(skb_in->cb)); icmpv6_send(skb_in, type, code, info); ipv6_hdr(skb_in)->saddr = orig_ip; out: consume_skb(cloned_skb); } #else -static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) -{ - memset(skb_in->cb, 0, sizeof(skb_in->cb)); - icmp_send(skb_in, type, code, info); -} -static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) -{ - memset(skb_in->cb, 0, sizeof(skb_in->cb)); - icmpv6_send(skb_in, type, code, info); -} +#define __compat_icmp_ndo_send icmp_send +#define __compat_icmpv6_ndo_send icmpv6_send #endif #define icmp_ndo_send __compat_icmp_ndo_send #define icmpv6_ndo_send __compat_icmpv6_ndo_send @@ -1052,7 +1015,7 @@ static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 #define COMPAT_CANNOT_USE_MAX_MTU #endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910) && !defined(ISUBUNTU1904) && !defined(ISRHEL8)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910) && !defined(ISUBUNTU1904) && (!defined(ISRHEL8) || defined(ISCENTOS8))) #include #include static inline void skb_reset_redirect(struct sk_buff *skb) @@ -1108,53 +1071,6 @@ static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tun #define kfree_sensitive(a) kzfree(a) #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) && !defined(ISRHEL7) -#define xchg_release xchg -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) && !defined(ISRHEL7) -#include -#ifndef smp_load_acquire -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ - smp_mb(); \ - ___p1; \ -}) -#endif -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) -#include -struct dst_cache_pcpu { - unsigned long refresh_ts; - struct dst_entry *dst; - u32 cookie; - union { - struct in_addr in_saddr; - struct in6_addr in6_saddr; - }; -}; -#define COMPAT_HAS_DEFINED_DST_CACHE_PCPU -static inline void dst_cache_reset_now(struct dst_cache *dst_cache) -{ - int i; - - if (!dst_cache->cache) - return; - - dst_cache->reset_ts = jiffies; - for_each_possible_cpu(i) { - struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); - struct dst_entry *dst = idst->dst; - - idst->cookie = 0; - idst->dst = NULL; - dst_release(dst); - } -} -#endif - #if defined(ISUBUNTU1604) || defined(ISRHEL7) #include #ifndef _WG_LINUX_SIPHASH_H @@ -1186,7 +1102,7 @@ static inline void dst_cache_reset_now(struct dst_cache *dst_cache) #undef __read_mostly #define __read_mostly #endif -#if (defined(CONFIG_PAX) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +#if (defined(RAP_PLUGIN) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) #include #define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer) #define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer) diff --git a/net/wireguard/compat/dst_cache/dst_cache.c b/net/wireguard/compat/dst_cache/dst_cache.c index f74c43c550eb..7ec22f768a8f 100644 --- a/net/wireguard/compat/dst_cache/dst_cache.c +++ b/net/wireguard/compat/dst_cache/dst_cache.c @@ -27,7 +27,6 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) #endif #include -#ifndef COMPAT_HAS_DEFINED_DST_CACHE_PCPU struct dst_cache_pcpu { unsigned long refresh_ts; struct dst_entry *dst; @@ -37,7 +36,6 @@ struct dst_cache_pcpu { struct in6_addr in6_saddr; }; }; -#endif static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, struct dst_entry *dst, u32 cookie) diff --git a/net/wireguard/compat/simd/include/linux/simd.h b/net/wireguard/compat/simd/include/linux/simd.h index e7f2550320c7..c75c72471e9e 100644 --- a/net/wireguard/compat/simd/include/linux/simd.h +++ b/net/wireguard/compat/simd/include/linux/simd.h @@ -9,6 +9,7 @@ #include #include #if defined(CONFIG_X86_64) +#include #include #elif defined(CONFIG_KERNEL_MODE_NEON) #include @@ -24,7 +25,7 @@ typedef enum { static inline void simd_get(simd_context_t *ctx) { - *ctx = !IS_ENABLED(CONFIG_PREEMPT_RT) && !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD; + *ctx = !IS_ENABLED(CONFIG_PREEMPT_RT_BASE) && may_use_simd() ? HAVE_FULL_SIMD : HAVE_NO_SIMD; } static inline void simd_put(simd_context_t *ctx) diff --git a/net/wireguard/compat/siphash/include/linux/siphash.h b/net/wireguard/compat/siphash/include/linux/siphash.h index 3b30b3c47778..1e5e337d15bf 100644 --- a/net/wireguard/compat/siphash/include/linux/siphash.h +++ b/net/wireguard/compat/siphash/include/linux/siphash.h @@ -22,7 +22,9 @@ typedef struct { } siphash_key_t; u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key); +#endif u64 siphash_1u64(const u64 a, const siphash_key_t *key); u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key); @@ -75,9 +77,10 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len, static inline u64 siphash(const void *data, size_t len, const siphash_key_t *key) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || - !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT)) return __siphash_unaligned(data, len, key); +#endif return ___siphash_aligned(data, len, key); } @@ -88,8 +91,10 @@ typedef struct { u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key); +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key); +#endif u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key); u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key); @@ -125,9 +130,10 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len, static inline u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key) { - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || - !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT)) return __hsiphash_unaligned(data, len, key); +#endif return ___hsiphash_aligned(data, len, key); } diff --git a/net/wireguard/compat/siphash/siphash.c b/net/wireguard/compat/siphash/siphash.c index 7dc72cb4a710..58855328e6e0 100644 --- a/net/wireguard/compat/siphash/siphash.c +++ b/net/wireguard/compat/siphash/siphash.c @@ -57,7 +57,6 @@ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -77,19 +76,19 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; fallthrough; - case 6: b |= ((u64)end[5]) << 40; fallthrough; - case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; case 4: b |= le32_to_cpup(data); break; - case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 3: b |= ((u64)end[2]) << 16; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif POSTAMBLE } -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -109,17 +108,18 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; fallthrough; - case 6: b |= ((u64)end[5]) << 40; fallthrough; - case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; case 4: b |= get_unaligned_le32(end); break; - case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 3: b |= ((u64)end[2]) << 16; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif POSTAMBLE } +#endif /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 @@ -250,7 +250,6 @@ u64 siphash_3u32(const u32 first, const u32 second, const u32 third, HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); @@ -269,19 +268,19 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; fallthrough; - case 6: b |= ((u64)end[5]) << 40; fallthrough; - case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; case 4: b |= le32_to_cpup(data); break; - case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 3: b |= ((u64)end[2]) << 16; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -301,17 +300,18 @@ u32 __hsiphash_unaligned(const void *data, size_t len, bytemask_from_count(left))); #else switch (left) { - case 7: b |= ((u64)end[6]) << 48; fallthrough; - case 6: b |= ((u64)end[5]) << 40; fallthrough; - case 5: b |= ((u64)end[4]) << 32; fallthrough; + case 7: b |= ((u64)end[6]) << 48; + case 6: b |= ((u64)end[5]) << 40; + case 5: b |= ((u64)end[4]) << 32; case 4: b |= get_unaligned_le32(end); break; - case 3: b |= ((u64)end[2]) << 16; fallthrough; + case 3: b |= ((u64)end[2]) << 16; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } +#endif /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 @@ -412,7 +412,6 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, HSIPROUND; \ return v1 ^ v3; -#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); @@ -426,14 +425,14 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) v0 ^= m; } switch (left) { - case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 3: b |= ((u32)end[2]) << 16; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } HPOSTAMBLE } -#endif +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { @@ -448,12 +447,13 @@ u32 __hsiphash_unaligned(const void *data, size_t len, v0 ^= m; } switch (left) { - case 3: b |= ((u32)end[2]) << 16; fallthrough; + case 3: b |= ((u32)end[2]) << 16; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } HPOSTAMBLE } +#endif /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 diff --git a/net/wireguard/compat/udp_tunnel/udp_tunnel.c b/net/wireguard/compat/udp_tunnel/udp_tunnel.c index d287b917be84..9b8770ae7b3f 100644 --- a/net/wireguard/compat/udp_tunnel/udp_tunnel.c +++ b/net/wireguard/compat/udp_tunnel/udp_tunnel.c @@ -38,10 +38,9 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, struct socket *sock = NULL; struct sockaddr_in udp_addr; - err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); + err = __sock_create(net, AF_INET, SOCK_DGRAM, 0, &sock, 1); if (err < 0) goto error; - sk_change_net(sock->sk, net); udp_addr.sin_family = AF_INET; udp_addr.sin_addr = cfg->local_ip; @@ -73,7 +72,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, error: if (sock) { kernel_sock_shutdown(sock, SHUT_RDWR); - sk_release_kernel(sock->sk); + sock_release(sock); } *sockp = NULL; return err; @@ -230,7 +229,7 @@ void udp_tunnel_sock_release(struct socket *sock) { rcu_assign_sk_user_data(sock->sk, NULL); kernel_sock_shutdown(sock, SHUT_RDWR); - sk_release_kernel(sock->sk); + sock_release(sock); } #if IS_ENABLED(CONFIG_IPV6) @@ -255,10 +254,9 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, int err; struct socket *sock = NULL; - err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); + err = __sock_create(net, AF_INET6, SOCK_DGRAM, 0, &sock, 1); if (err < 0) goto error; - sk_change_net(sock->sk, net); if (cfg->ipv6_v6only) { int val = 1; @@ -303,7 +301,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, error: if (sock) { kernel_sock_shutdown(sock, SHUT_RDWR); - sk_release_kernel(sock->sk); + sock_release(sock); } *sockp = NULL; return err; diff --git a/net/wireguard/compat/version/linux/version.h b/net/wireguard/compat/version/linux/version.h deleted file mode 100644 index 90988b37aed6..000000000000 --- a/net/wireguard/compat/version/linux/version.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2015-2021 Jason A. Donenfeld . All Rights Reserved. - */ - -#include_next -#undef KERNEL_VERSION -#define KERNEL_VERSION(a, b, c) (((a) << 24) + ((b) << 16) + (c)) -#undef LINUX_VERSION_CODE -#define LINUX_VERSION_CODE KERNEL_VERSION(COMPAT_VERSION, COMPAT_PATCHLEVEL, COMPAT_SUBLEVEL) diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c index f26ed5d897ac..79716c425b0c 100644 --- a/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c +++ b/net/wireguard/crypto/zinc/curve25519/curve25519-x86_64.c @@ -581,8 +581,8 @@ static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) " cmovc %%rdx, %%rax;" " add %%rax, %%r8;" " movq %%r8, 0(%0);" - : "+&r,&r" (tmp), "+&r,&r" (f) - : "r,m" (out) + : "+&r" (tmp), "+&r" (f), "+&r" (out) + : : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc" ); } @@ -743,8 +743,8 @@ static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) " cmovc %%rdx, %%rax;" " add %%rax, %%r8;" " movq %%r8, 32(%0);" - : "+&r,&r" (tmp), "+&r,&r" (f) - : "r,m" (out) + : "+&r" (tmp), "+&r" (f), "+&r" (out) + : : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc" ); } diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519.c b/net/wireguard/crypto/zinc/curve25519/curve25519.c index dffaa09c18db..d4066fe148af 100644 --- a/net/wireguard/crypto/zinc/curve25519/curve25519.c +++ b/net/wireguard/crypto/zinc/curve25519/curve25519.c @@ -13,6 +13,7 @@ #include "../selftest/run.h" #include +#include #include #include #include diff --git a/net/wireguard/device.c b/net/wireguard/device.c index ece4ad2db8b7..1155783a8f10 100644 --- a/net/wireguard/device.c +++ b/net/wireguard/device.c @@ -106,7 +106,6 @@ static int wg_stop(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; - struct sk_buff *skb; mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { @@ -117,9 +116,7 @@ static int wg_stop(struct net_device *dev) wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); } mutex_unlock(&wg->device_update_lock); - while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) - kfree_skb(skb); - atomic_set(&wg->handshake_queue_len, 0); + skb_queue_purge(&wg->incoming_handshakes); wg_socket_reinit(wg, NULL, NULL); return 0; } @@ -149,7 +146,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) else if (skb->protocol == htons(ETH_P_IPV6)) net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", dev->name, &ipv6_hdr(skb)->daddr); - goto err_icmp; + goto err; } family = READ_ONCE(peer->endpoint.addr.sa_family); @@ -168,7 +165,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) } else { struct sk_buff *segs = skb_gso_segment(skb, 0); - if (IS_ERR(segs)) { + if (unlikely(IS_ERR(segs))) { ret = PTR_ERR(segs); goto err_peer; } @@ -212,13 +209,12 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) err_peer: wg_peer_put(peer); -err_icmp: +err: + ++dev->stats.tx_errors; if (skb->protocol == htons(ETH_P_IP)) icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -err: - ++dev->stats.tx_errors; kfree_skb(skb); return ret; } @@ -246,13 +242,14 @@ static void wg_destruct(struct net_device *dev) destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); - wg_packet_queue_free(&wg->handshake_queue, true); - wg_packet_queue_free(&wg->decrypt_queue, false); - wg_packet_queue_free(&wg->encrypt_queue, false); + wg_packet_queue_free(&wg->decrypt_queue, true); + wg_packet_queue_free(&wg->encrypt_queue, true); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); + skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); + free_percpu(wg->incoming_handshakes_worker); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); @@ -314,6 +311,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); + skb_queue_head_init(&wg->incoming_handshakes); wg_allowedips_init(&wg->peer_allowedips); wg_cookie_checker_init(&wg->cookie_checker, wg); INIT_LIST_HEAD(&wg->peer_list); @@ -331,10 +329,16 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, if (!dev->tstats) goto err_free_index_hashtable; + wg->incoming_handshakes_worker = + wg_packet_percpu_multicore_worker_alloc( + wg_packet_handshake_receive_worker, wg); + if (!wg->incoming_handshakes_worker) + goto err_free_tstats; + wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_receive_wq) - goto err_free_tstats; + goto err_free_incoming_handshakes; wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); @@ -347,23 +351,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, goto err_destroy_handshake_send; ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, - MAX_QUEUED_PACKETS); + true, MAX_QUEUED_PACKETS); if (ret < 0) goto err_destroy_packet_crypt; ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, - MAX_QUEUED_PACKETS); + true, MAX_QUEUED_PACKETS); if (ret < 0) goto err_free_encrypt_queue; - ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, - MAX_QUEUED_INCOMING_HANDSHAKES); - if (ret < 0) - goto err_free_decrypt_queue; - ret = wg_ratelimiter_init(); if (ret < 0) - goto err_free_handshake_queue; + goto err_free_decrypt_queue; ret = register_netdevice(dev); if (ret < 0) @@ -381,18 +380,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, err_uninit_ratelimiter: wg_ratelimiter_uninit(); -err_free_handshake_queue: - wg_packet_queue_free(&wg->handshake_queue, false); err_free_decrypt_queue: - wg_packet_queue_free(&wg->decrypt_queue, false); + wg_packet_queue_free(&wg->decrypt_queue, true); err_free_encrypt_queue: - wg_packet_queue_free(&wg->encrypt_queue, false); + wg_packet_queue_free(&wg->encrypt_queue, true); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: destroy_workqueue(wg->handshake_send_wq); err_destroy_handshake_receive: destroy_workqueue(wg->handshake_receive_wq); +err_free_incoming_handshakes: + free_percpu(wg->incoming_handshakes_worker); err_free_tstats: free_percpu(dev->tstats); err_free_index_hashtable: @@ -412,7 +411,6 @@ static struct rtnl_link_ops link_ops __read_mostly = { static void wg_netns_pre_exit(struct net *net) { struct wg_device *wg; - struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { @@ -422,8 +420,6 @@ static void wg_netns_pre_exit(struct net *net) mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg_socket_reinit(wg, NULL, NULL); - list_for_each_entry(peer, &wg->peer_list, peer_list) - wg_socket_clear_peer_endpoint_src(peer); mutex_unlock(&wg->device_update_lock); } } diff --git a/net/wireguard/device.h b/net/wireguard/device.h index 43c7cebbf50b..4d0144e16947 100644 --- a/net/wireguard/device.h +++ b/net/wireguard/device.h @@ -27,30 +27,32 @@ struct multicore_worker { struct crypt_queue { struct ptr_ring ring; - struct multicore_worker __percpu *worker; - int last_cpu; -}; - -struct prev_queue { - struct sk_buff *head, *tail, *peeked; - struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. - atomic_t count; + union { + struct { + struct multicore_worker __percpu *worker; + int last_cpu; + }; + struct work_struct work; + }; }; struct wg_device { struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue; + struct crypt_queue encrypt_queue, decrypt_queue; struct sock __rcu *sock4, *sock6; struct net __rcu *creating_net; struct noise_static_identity static_identity; - struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq; + struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; + struct workqueue_struct *packet_crypt_wq; + struct sk_buff_head incoming_handshakes; + int incoming_handshake_cpu; + struct multicore_worker __percpu *incoming_handshakes_worker; struct cookie_checker cookie_checker; struct pubkey_hashtable *peer_hashtable; struct index_hashtable *index_hashtable; struct allowedips peer_allowedips; struct mutex device_update_lock, socket_update_lock; struct list_head device_list, peer_list; - atomic_t handshake_queue_len; unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; diff --git a/net/wireguard/main.c b/net/wireguard/main.c index d5ce491e822e..543501158891 100644 --- a/net/wireguard/main.c +++ b/net/wireguard/main.c @@ -17,7 +17,7 @@ #include #include -static int __init wg_mod_init(void) +static int __init mod_init(void) { int ret; @@ -26,22 +26,13 @@ static int __init wg_mod_init(void) (ret = curve25519_mod_init())) return ret; - ret = wg_allowedips_slab_init(); - if (ret < 0) - goto err_allowedips; - #ifdef DEBUG - ret = -ENOTRECOVERABLE; if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || !wg_ratelimiter_selftest()) - goto err_peer; + return -ENOTRECOVERABLE; #endif wg_noise_init(); - ret = wg_peer_init(); - if (ret < 0) - goto err_peer; - ret = wg_device_init(); if (ret < 0) goto err_device; @@ -58,23 +49,17 @@ static int __init wg_mod_init(void) err_netlink: wg_device_uninit(); err_device: - wg_peer_uninit(); -err_peer: - wg_allowedips_slab_uninit(); -err_allowedips: return ret; } -static void __exit wg_mod_exit(void) +static void __exit mod_exit(void) { wg_genetlink_uninit(); wg_device_uninit(); - wg_peer_uninit(); - wg_allowedips_slab_uninit(); } -module_init(wg_mod_init); -module_exit(wg_mod_exit); +module_init(mod_init); +module_exit(mod_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("WireGuard secure network tunnel"); MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/net/wireguard/peer.c b/net/wireguard/peer.c index 1acd00ab2fbc..b3b6370e6b95 100644 --- a/net/wireguard/peer.c +++ b/net/wireguard/peer.c @@ -15,7 +15,6 @@ #include #include -static struct kmem_cache *peer_cache; static atomic64_t peer_counter = ATOMIC64_INIT(0); struct wg_peer *wg_peer_create(struct wg_device *wg, @@ -30,25 +29,30 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, if (wg->num_peers >= MAX_PEERS_PER_DEVICE) return ERR_PTR(ret); - peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); + peer = kzalloc(sizeof(*peer), GFP_KERNEL); if (unlikely(!peer)) return ERR_PTR(ret); - if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) - goto err; - peer->device = wg; + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, public_key, preshared_key, peer); + if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) + goto err_1; + if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, + MAX_QUEUED_PACKETS)) + goto err_2; + if (wg_packet_queue_init(&peer->rx_queue, NULL, false, + MAX_QUEUED_PACKETS)) + goto err_3; + peer->internal_id = atomic64_inc_return(&peer_counter); peer->serial_work_cpu = nr_cpumask_bits; wg_cookie_init(&peer->latest_cookie); wg_timers_init(peer); wg_cookie_checker_precompute_peer_keys(peer); spin_lock_init(&peer->keypairs.keypair_update_lock); - INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); - INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); - wg_prev_queue_init(&peer->tx_queue); - wg_prev_queue_init(&peer->rx_queue); + INIT_WORK(&peer->transmit_handshake_work, + wg_packet_handshake_send_worker); rwlock_init(&peer->endpoint_lock); kref_init(&peer->refcount); skb_queue_head_init(&peer->staged_packet_queue); @@ -64,8 +68,12 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); return peer; -err: - kmem_cache_free(peer_cache, peer); +err_3: + wg_packet_queue_free(&peer->tx_queue, false); +err_2: + dst_cache_destroy(&peer->endpoint_cache); +err_1: + kfree(peer); return ERR_PTR(ret); } @@ -89,7 +97,7 @@ static void peer_make_dead(struct wg_peer *peer) /* Mark as dead, so that we don't allow jumping contexts after. */ WRITE_ONCE(peer->is_dead, true); - /* The caller must now synchronize_net() for this to take effect. */ + /* The caller must now synchronize_rcu() for this to take effect. */ } static void peer_remove_after_dead(struct wg_peer *peer) @@ -161,7 +169,7 @@ void wg_peer_remove(struct wg_peer *peer) lockdep_assert_held(&peer->device->device_update_lock); peer_make_dead(peer); - synchronize_net(); + synchronize_rcu(); peer_remove_after_dead(peer); } @@ -179,7 +187,7 @@ void wg_peer_remove_all(struct wg_device *wg) peer_make_dead(peer); list_add_tail(&peer->peer_list, &dead_peers); } - synchronize_net(); + synchronize_rcu(); list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) peer_remove_after_dead(peer); } @@ -189,13 +197,13 @@ static void rcu_release(struct rcu_head *rcu) struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); dst_cache_destroy(&peer->endpoint_cache); - WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); + wg_packet_queue_free(&peer->rx_queue, false); + wg_packet_queue_free(&peer->tx_queue, false); /* The final zeroing takes care of clearing any remaining handshake key * material and other potentially sensitive information. */ - memzero_explicit(peer, sizeof(*peer)); - kmem_cache_free(peer_cache, peer); + kfree_sensitive(peer); } static void kref_release(struct kref *refcount) @@ -227,14 +235,3 @@ void wg_peer_put(struct wg_peer *peer) return; kref_put(&peer->refcount, kref_release); } - -int __init wg_peer_init(void) -{ - peer_cache = KMEM_CACHE(wg_peer, 0); - return peer_cache ? 0 : -ENOMEM; -} - -void wg_peer_uninit(void) -{ - kmem_cache_destroy(peer_cache); -} diff --git a/net/wireguard/peer.h b/net/wireguard/peer.h index 76e4d3128ad4..23af40922997 100644 --- a/net/wireguard/peer.h +++ b/net/wireguard/peer.h @@ -36,17 +36,16 @@ struct endpoint { struct wg_peer { struct wg_device *device; - struct prev_queue tx_queue, rx_queue; + struct crypt_queue tx_queue, rx_queue; struct sk_buff_head staged_packet_queue; int serial_work_cpu; - bool is_dead; struct noise_keypairs keypairs; struct endpoint endpoint; struct dst_cache endpoint_cache; rwlock_t endpoint_lock; struct noise_handshake handshake; atomic64_t last_sent_handshake; - struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; + struct work_struct transmit_handshake_work, clear_peer_work; struct cookie latest_cookie; struct hlist_node pubkey_hash; u64 rx_bytes, tx_bytes; @@ -62,8 +61,9 @@ struct wg_peer { struct rcu_head rcu; struct list_head peer_list; struct list_head allowedips_list; - struct napi_struct napi; u64 internal_id; + struct napi_struct napi; + bool is_dead; }; struct wg_peer *wg_peer_create(struct wg_device *wg, @@ -80,7 +80,4 @@ void wg_peer_put(struct wg_peer *peer); void wg_peer_remove(struct wg_peer *peer); void wg_peer_remove_all(struct wg_device *wg); -int wg_peer_init(void); -void wg_peer_uninit(void); - #endif /* _WG_PEER_H */ diff --git a/net/wireguard/queueing.c b/net/wireguard/queueing.c index 1de413b19e34..71b8e80b58e1 100644 --- a/net/wireguard/queueing.c +++ b/net/wireguard/queueing.c @@ -9,7 +9,8 @@ struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) { int cpu; - struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); + struct multicore_worker __percpu *worker = + alloc_percpu(struct multicore_worker); if (!worker) return NULL; @@ -22,7 +23,7 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) } int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, - unsigned int len) + bool multicore, unsigned int len) { int ret; @@ -30,78 +31,25 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); if (ret) return ret; - queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); - if (!queue->worker) { - ptr_ring_cleanup(&queue->ring, NULL); - return -ENOMEM; + if (function) { + if (multicore) { + queue->worker = wg_packet_percpu_multicore_worker_alloc( + function, queue); + if (!queue->worker) { + ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; + } + } else { + INIT_WORK(&queue->work, function); + } } return 0; } -void wg_packet_queue_free(struct crypt_queue *queue, bool purge) +void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) { - free_percpu(queue->worker); - WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); + if (multicore) + free_percpu(queue->worker); + WARN_ON(!__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, NULL); } - -#define NEXT(skb) ((skb)->prev) -#define STUB(queue) ((struct sk_buff *)&queue->empty) - -void wg_prev_queue_init(struct prev_queue *queue) -{ - NEXT(STUB(queue)) = NULL; - queue->head = queue->tail = STUB(queue); - queue->peeked = NULL; - atomic_set(&queue->count, 0); - BUILD_BUG_ON( - offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - - offsetof(struct prev_queue, empty) || - offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - - offsetof(struct prev_queue, empty)); -} - -static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -{ - WRITE_ONCE(NEXT(skb), NULL); - WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); -} - -bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -{ - if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) - return false; - __wg_prev_queue_enqueue(queue, skb); - return true; -} - -struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) -{ - struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); - - if (tail == STUB(queue)) { - if (!next) - return NULL; - queue->tail = next; - tail = next; - next = smp_load_acquire(&NEXT(next)); - } - if (next) { - queue->tail = next; - atomic_dec(&queue->count); - return tail; - } - if (tail != READ_ONCE(queue->head)) - return NULL; - __wg_prev_queue_enqueue(queue, STUB(queue)); - next = smp_load_acquire(&NEXT(tail)); - if (next) { - queue->tail = next; - atomic_dec(&queue->count); - return tail; - } - return NULL; -} - -#undef NEXT -#undef STUB diff --git a/net/wireguard/queueing.h b/net/wireguard/queueing.h index 03850c43ebaf..bab170b95938 100644 --- a/net/wireguard/queueing.h +++ b/net/wireguard/queueing.h @@ -17,13 +17,12 @@ struct wg_device; struct wg_peer; struct multicore_worker; struct crypt_queue; -struct prev_queue; struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, - unsigned int len); -void wg_packet_queue_free(struct crypt_queue *queue, bool purge); + bool multicore, unsigned int len); +void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); @@ -139,31 +138,8 @@ static inline int wg_cpumask_next_online(int *next) return cpu; } -void wg_prev_queue_init(struct prev_queue *queue); - -/* Multi producer */ -bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); - -/* Single consumer */ -struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); - -/* Single consumer */ -static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) -{ - if (queue->peeked) - return queue->peeked; - queue->peeked = wg_prev_queue_dequeue(queue); - return queue->peeked; -} - -/* Single consumer */ -static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) -{ - queue->peeked = NULL; -} - static inline int wg_queue_enqueue_per_device_and_peer( - struct crypt_queue *device_queue, struct prev_queue *peer_queue, + struct crypt_queue *device_queue, struct crypt_queue *peer_queue, struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) { int cpu; @@ -172,9 +148,8 @@ static inline int wg_queue_enqueue_per_device_and_peer( /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before. */ - if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) + if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) return -ENOSPC; - /* Then we queue it up in the device queue, which consumes the * packet as soon as it can. */ @@ -185,7 +160,9 @@ static inline int wg_queue_enqueue_per_device_and_peer( return 0; } -static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) +static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, + struct sk_buff *skb, + enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. @@ -193,12 +170,14 @@ static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); - queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), - peer->device->packet_crypt_wq, &peer->transmit_packet_work); + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, + peer->internal_id), + peer->device->packet_crypt_wq, &queue->work); wg_peer_put(peer); } -static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) +static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, + enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. diff --git a/net/wireguard/ratelimiter.c b/net/wireguard/ratelimiter.c index ecee41f528a5..e33ec72a9642 100644 --- a/net/wireguard/ratelimiter.c +++ b/net/wireguard/ratelimiter.c @@ -188,12 +188,12 @@ int wg_ratelimiter_init(void) (1U << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL); + table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); if (unlikely(!table_v4)) goto err_kmemcache; #if IS_ENABLED(CONFIG_IPV6) - table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL); + table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); if (unlikely(!table_v6)) { kvfree(table_v4); goto err_kmemcache; diff --git a/net/wireguard/receive.c b/net/wireguard/receive.c index 214889edb48e..172ef823d327 100644 --- a/net/wireguard/receive.c +++ b/net/wireguard/receive.c @@ -117,8 +117,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg, return; } - under_load = atomic_read(&wg->handshake_queue_len) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; + under_load = skb_queue_len(&wg->incoming_handshakes) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); } else if (last_under_load) { @@ -213,14 +213,13 @@ static void wg_receive_handshake_packet(struct wg_device *wg, void wg_packet_handshake_receive_worker(struct work_struct *work) { - struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; - struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); + struct wg_device *wg = container_of(work, struct multicore_worker, + work)->ptr; struct sk_buff *skb; - while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { + while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { wg_receive_handshake_packet(wg, skb); dev_kfree_skb(skb); - atomic_dec(&wg->handshake_queue_len); cond_resched(); } } @@ -450,6 +449,7 @@ packet_processed: int wg_packet_rx_poll(struct napi_struct *napi, int budget) { struct wg_peer *peer = container_of(napi, struct wg_peer, napi); + struct crypt_queue *queue = &peer->rx_queue; struct noise_keypair *keypair; struct endpoint endpoint; enum packet_state state; @@ -460,10 +460,11 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget) if (unlikely(budget <= 0)) return 0; - while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && + while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { - wg_prev_queue_drop_peeked(&peer->rx_queue); + __ptr_ring_discard_one(&queue->ring); + peer = PACKET_PEER(skb); keypair = PACKET_CB(skb)->keypair; free = true; @@ -515,7 +516,7 @@ void wg_packet_decrypt_worker(struct work_struct *work) likely(decrypt_packet(skb, PACKET_CB(skb)->keypair, &simd_context)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_rx(skb, state); + wg_queue_enqueue_per_peer_napi(skb, state); simd_relax(&simd_context); } @@ -539,10 +540,12 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) if (unlikely(READ_ONCE(peer->is_dead))) goto err; - ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, - wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, + &peer->rx_queue, skb, + wg->packet_crypt_wq, + &wg->decrypt_queue.last_cpu); if (unlikely(ret == -EPIPE)) - wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); + wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); if (likely(!ret || ret == -EPIPE)) { rcu_read_unlock_bh(); return; @@ -563,28 +566,22 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { - int cpu, ret = -EBUSY; - - if (unlikely(!rng_is_initialized())) - goto drop; - if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { - if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { - ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); - spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); - } - } else - ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); - if (ret) { - drop: + int cpu; + + if (skb_queue_len(&wg->incoming_handshakes) > + MAX_QUEUED_INCOMING_HANDSHAKES || + unlikely(!rng_is_initialized())) { net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", wg->dev->name, skb); goto err; } - atomic_inc(&wg->handshake_queue_len); - cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); - /* Queues up a call to packet_process_queued_handshake_packets(skb): */ + skb_queue_tail(&wg->incoming_handshakes, skb); + /* Queues up a call to packet_process_queued_handshake_ + * packets(skb): + */ + cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); queue_work_on(cpu, wg->handshake_receive_wq, - &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); + &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); break; } case cpu_to_le32(MESSAGE_DATA): diff --git a/net/wireguard/selftest/allowedips.c b/net/wireguard/selftest/allowedips.c index e173204ae7d7..846db14cb046 100644 --- a/net/wireguard/selftest/allowedips.c +++ b/net/wireguard/selftest/allowedips.c @@ -19,22 +19,32 @@ #include +static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, + u8 cidr) +{ + swap_endian(dst, src, bits); + memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); + if (cidr) + dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); +} + static __init void print_node(struct allowedips_node *node, u8 bits) { char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; - char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; - u8 ip1[16], ip2[16], cidr1, cidr2; + char *fmt_declaration = KERN_DEBUG + "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; char *style = "dotted"; + u8 ip1[16], ip2[16]; u32 color = 0; - if (node == NULL) - return; if (bits == 32) { fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; - fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; + fmt_declaration = KERN_DEBUG + "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; } else if (bits == 128) { fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; - fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; + fmt_declaration = KERN_DEBUG + "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; } if (node->peer) { hsiphash_key_t key = { { 0 } }; @@ -45,20 +55,24 @@ static __init void print_node(struct allowedips_node *node, u8 bits) hsiphash_1u32(0xabad1dea, &key) % 200; style = "bold"; } - wg_allowedips_read_node(node, ip1, &cidr1); - printk(fmt_declaration, ip1, cidr1, style, color); + swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); + printk(fmt_declaration, ip1, node->cidr, style, color); if (node->bit[0]) { - wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); - printk(fmt_connection, ip1, cidr1, ip2, cidr2); + swap_endian_and_apply_cidr(ip2, + rcu_dereference_raw(node->bit[0])->bits, bits, + node->cidr); + printk(fmt_connection, ip1, node->cidr, ip2, + rcu_dereference_raw(node->bit[0])->cidr); + print_node(rcu_dereference_raw(node->bit[0]), bits); } if (node->bit[1]) { - wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); - printk(fmt_connection, ip1, cidr1, ip2, cidr2); - } - if (node->bit[0]) - print_node(rcu_dereference_raw(node->bit[0]), bits); - if (node->bit[1]) + swap_endian_and_apply_cidr(ip2, + rcu_dereference_raw(node->bit[1])->bits, + bits, node->cidr); + printk(fmt_connection, ip1, node->cidr, ip2, + rcu_dereference_raw(node->bit[1])->cidr); print_node(rcu_dereference_raw(node->bit[1]), bits); + } } static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) @@ -107,8 +121,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) { union nf_inet_addr mask; - memset(&mask, 0, sizeof(mask)); - memset(&mask.all, 0xff, cidr / 8); + memset(&mask, 0x00, 128 / 8); + memset(&mask, 0xff, cidr / 8); if (cidr % 32) mask.all[cidr / 32] = (__force u32)htonl( (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); @@ -135,36 +149,42 @@ horrible_mask_self(struct horrible_allowedips_node *node) } static __init inline bool -horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) +horrible_match_v4(const struct horrible_allowedips_node *node, + struct in_addr *ip) { return (ip->s_addr & node->mask.ip) == node->ip.ip; } static __init inline bool -horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) +horrible_match_v6(const struct horrible_allowedips_node *node, + struct in6_addr *ip) { - return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && - (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && - (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && + return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == + node->ip.ip6[0] && + (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == + node->ip.ip6[1] && + (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == + node->ip.ip6[2] && (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; } static __init void -horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) +horrible_insert_ordered(struct horrible_allowedips *table, + struct horrible_allowedips_node *node) { struct horrible_allowedips_node *other = NULL, *where = NULL; u8 my_cidr = horrible_mask_to_cidr(node->mask); hlist_for_each_entry(other, &table->head, table) { - if (other->ip_version == node->ip_version && - !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && - !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { + if (!memcmp(&other->mask, &node->mask, + sizeof(union nf_inet_addr)) && + !memcmp(&other->ip, &node->ip, + sizeof(union nf_inet_addr)) && + other->ip_version == node->ip_version) { other->value = node->value; kfree(node); return; } - } - hlist_for_each_entry(other, &table->head, table) { where = other; if (horrible_mask_to_cidr(other->mask) <= my_cidr) break; @@ -181,7 +201,8 @@ static __init int horrible_allowedips_insert_v4(struct horrible_allowedips *table, struct in_addr *ip, u8 cidr, void *value) { - struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), + GFP_KERNEL); if (unlikely(!node)) return -ENOMEM; @@ -198,7 +219,8 @@ static __init int horrible_allowedips_insert_v6(struct horrible_allowedips *table, struct in6_addr *ip, u8 cidr, void *value) { - struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + struct horrible_allowedips_node *node = kzalloc(sizeof(*node), + GFP_KERNEL); if (unlikely(!node)) return -ENOMEM; @@ -212,43 +234,39 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table, } static __init void * -horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) +horrible_allowedips_lookup_v4(struct horrible_allowedips *table, + struct in_addr *ip) { struct horrible_allowedips_node *node; + void *ret = NULL; hlist_for_each_entry(node, &table->head, table) { - if (node->ip_version == 4 && horrible_match_v4(node, ip)) - return node->value; + if (node->ip_version != 4) + continue; + if (horrible_match_v4(node, ip)) { + ret = node->value; + break; + } } - return NULL; + return ret; } static __init void * -horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) +horrible_allowedips_lookup_v6(struct horrible_allowedips *table, + struct in6_addr *ip) { struct horrible_allowedips_node *node; + void *ret = NULL; hlist_for_each_entry(node, &table->head, table) { - if (node->ip_version == 6 && horrible_match_v6(node, ip)) - return node->value; - } - return NULL; -} - - -static __init void -horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) -{ - struct horrible_allowedips_node *node; - struct hlist_node *h; - - hlist_for_each_entry_safe(node, h, &table->head, table) { - if (node->value != value) + if (node->ip_version != 6) continue; - hlist_del(&node->table); - kfree(node); + if (horrible_match_v6(node, ip)) { + ret = node->value; + break; + } } - + return ret; } static __init bool randomized_test(void) @@ -278,7 +296,6 @@ static __init bool randomized_test(void) goto free; } kref_init(&peers[i]->refcount); - INIT_LIST_HEAD(&peers[i]->allowedips_list); } mutex_lock(&mutex); @@ -316,7 +333,7 @@ static __init bool randomized_test(void) if (wg_allowedips_insert_v4(&t, (struct in_addr *)mutated, cidr, peer, &mutex) < 0) { - pr_err("allowedips random self-test malloc: FAIL\n"); + pr_err("allowedips random malloc: FAIL\n"); goto free_locked; } if (horrible_allowedips_insert_v4(&h, @@ -379,33 +396,23 @@ static __init bool randomized_test(void) print_tree(t.root6, 128); } - for (j = 0;; ++j) { - for (i = 0; i < NUM_QUERIES; ++i) { - prandom_bytes(ip, 4); - if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { - horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); - pr_err("allowedips random v4 self-test: FAIL\n"); - goto free; - } - prandom_bytes(ip, 16); - if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { - pr_err("allowedips random v6 self-test: FAIL\n"); - goto free; - } + for (i = 0; i < NUM_QUERIES; ++i) { + prandom_bytes(ip, 4); + if (lookup(t.root4, 32, ip) != + horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { + pr_err("allowedips random self-test: FAIL\n"); + goto free; } - if (j >= NUM_PEERS) - break; - mutex_lock(&mutex); - wg_allowedips_remove_by_peer(&t, peers[j], &mutex); - mutex_unlock(&mutex); - horrible_allowedips_remove_by_value(&h, peers[j]); } - if (t.root4 || t.root6) { - pr_err("allowedips random self-test removal: FAIL\n"); - goto free; + for (i = 0; i < NUM_QUERIES; ++i) { + prandom_bytes(ip, 16); + if (lookup(t.root6, 128, ip) != + horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { + pr_err("allowedips random self-test: FAIL\n"); + goto free; + } } - ret = true; free: diff --git a/net/wireguard/send.c b/net/wireguard/send.c index 55bb0c9313d7..828b086abe31 100644 --- a/net/wireguard/send.c +++ b/net/wireguard/send.c @@ -242,7 +242,8 @@ void wg_packet_send_keepalive(struct wg_peer *peer) wg_packet_send_staged_packets(peer); } -static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) +static void wg_packet_create_data_done(struct sk_buff *first, + struct wg_peer *peer) { struct sk_buff *skb, *next; bool is_keepalive, data_sent = false; @@ -264,19 +265,22 @@ static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *fir void wg_packet_tx_worker(struct work_struct *work) { - struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); + struct crypt_queue *queue = container_of(work, struct crypt_queue, + work); struct noise_keypair *keypair; enum packet_state state; struct sk_buff *first; + struct wg_peer *peer; - while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && + while ((first = __ptr_ring_peek(&queue->ring)) != NULL && (state = atomic_read_acquire(&PACKET_CB(first)->state)) != PACKET_STATE_UNCRYPTED) { - wg_prev_queue_drop_peeked(&peer->tx_queue); + __ptr_ring_discard_one(&queue->ring); + peer = PACKET_PEER(first); keypair = PACKET_CB(first)->keypair; if (likely(state == PACKET_STATE_CRYPTED)) - wg_packet_create_data_done(peer, first); + wg_packet_create_data_done(first, peer); else kfree_skb_list(first); @@ -308,15 +312,17 @@ void wg_packet_encrypt_worker(struct work_struct *work) break; } } - wg_queue_enqueue_per_peer_tx(first, state); + wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, + state); simd_relax(&simd_context); } simd_put(&simd_context); } -static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) +static void wg_packet_create_data(struct sk_buff *first) { + struct wg_peer *peer = PACKET_PEER(first); struct wg_device *wg = peer->device; int ret = -EINVAL; @@ -324,10 +330,13 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) if (unlikely(READ_ONCE(peer->is_dead))) goto err; - ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, - wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, + &peer->tx_queue, first, + wg->packet_crypt_wq, + &wg->encrypt_queue.last_cpu); if (unlikely(ret == -EPIPE)) - wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); + wg_queue_enqueue_per_peer(&peer->tx_queue, first, + PACKET_STATE_DEAD); err: rcu_read_unlock_bh(); if (likely(!ret || ret == -EPIPE)) @@ -391,7 +400,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer) packets.prev->next = NULL; wg_peer_get(keypair->entry.peer); PACKET_CB(packets.next)->keypair = keypair; - wg_packet_create_data(peer, packets.next); + wg_packet_create_data(packets.next); return; out_invalid: diff --git a/net/wireguard/socket.c b/net/wireguard/socket.c index bd887f33a3a8..c33e2c81635f 100644 --- a/net/wireguard/socket.c +++ b/net/wireguard/socket.c @@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, fl.saddr, RT_SCOPE_HOST))) { endpoint->src4.s_addr = 0; - endpoint->src_if4 = 0; + *(__force __be32 *)&endpoint->src_if4 = 0; fl.saddr = 0; if (cache) dst_cache_reset(cache); @@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && rt->dst.dev->ifindex != endpoint->src_if4)))) { endpoint->src4.s_addr = 0; - endpoint->src_if4 = 0; + *(__force __be32 *)&endpoint->src_if4 = 0; fl.saddr = 0; if (cache) dst_cache_reset(cache); @@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb, ip_rt_put(rt); rt = ip_route_output_flow(sock_net(sock), &fl, sock); } - if (IS_ERR(rt)) { + if (unlikely(IS_ERR(rt))) { ret = PTR_ERR(rt); net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); @@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, } dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, NULL); - if (IS_ERR(dst)) { + if (unlikely(IS_ERR(dst))) { ret = PTR_ERR(dst); net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", wg->dev->name, &endpoint->addr, ret); @@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) { write_lock_bh(&peer->endpoint_lock); memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); - dst_cache_reset_now(&peer->endpoint_cache); + dst_cache_reset(&peer->endpoint_cache); write_unlock_bh(&peer->endpoint_lock); } @@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4, if (new4) wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); mutex_unlock(&wg->socket_update_lock); - synchronize_net(); + synchronize_rcu(); sock_free(old4); sock_free(old6); } diff --git a/net/wireguard/version.h b/net/wireguard/version.h index ba3d8058de80..eada02358d4e 100644 --- a/net/wireguard/version.h +++ b/net/wireguard/version.h @@ -1,3 +1,3 @@ #ifndef WIREGUARD_VERSION -#define WIREGUARD_VERSION "1.0.20211208" +#define WIREGUARD_VERSION "1.0.20201112" #endif diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8966cf3fa821..8ebe87ee1b52 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10453,7 +10453,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; - struct cfg80211_gtk_rekey_data rekey_data = {}; + struct cfg80211_gtk_rekey_data rekey_data; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index afe55f6ebb6e..422bcb0612dd 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -947,14 +947,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, * be grouped with this beacon for updates ... */ if (!cfg80211_combine_bsses(rdev, new)) { - bss_ref_put(rdev, new); + kfree(new); goto drop; } } if (rdev->bss_entries >= bss_entries_limit && !cfg80211_bss_expire_oldest(rdev)) { - bss_ref_put(rdev, new); + kfree(new); goto drop; } diff --git a/net/wireless/sme.c b/net/wireless/sme.c index ed772d4937a9..25355cb0be5a 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -546,7 +546,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, if (wdev->current_bss) return -EALREADY; - if (wdev->conn) + if (WARN_ON(wdev->conn)) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); diff --git a/net/wireless/util.c b/net/wireless/util.c index 95eab2690f4f..dd4be0466ae0 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -410,8 +410,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr) } EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen); -static int __ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype, bool is_amsdu) +int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 hdrlen, ethertype; @@ -505,7 +505,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; - if (likely((!is_amsdu && ether_addr_equal(payload, rfc1042_header) && + if (likely((ether_addr_equal(payload, rfc1042_header) && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || ether_addr_equal(payload, bridge_tunnel_header))) { /* remove RFC1042 or Bridge-Tunnel encapsulation and @@ -526,12 +526,6 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, } return 0; } - -int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, - enum nl80211_iftype iftype) -{ - return __ieee80211_data_to_8023(skb, addr, iftype, false); -} EXPORT_SYMBOL(ieee80211_data_to_8023); int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr, @@ -691,9 +685,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, /* the last MSDU has no padding */ if (subframe_len > remaining) goto purge; - /* mitigate A-MSDU aggregation injection attacks */ - if (ether_addr_equal(eth->h_dest, rfc1042_header)) - goto purge; skb_pull(skb, sizeof(struct ethhdr)); /* reuse skb for the last subframe */ @@ -959,7 +950,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, switch (otype) { case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev, true); break; case NL80211_IFTYPE_ADHOC: @@ -975,9 +965,6 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_MESH_POINT: /* mesh should be handled? */ break; - case NL80211_IFTYPE_OCB: - cfg80211_leave_ocb(rdev, dev); - break; default: break; } diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 9a929010ea9d..843d2cf1e6a6 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -895,9 +895,8 @@ out: int call_commit_handler(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT - if (netif_running(dev) && - dev->wireless_handlers && - dev->wireless_handlers->standard[0]) + if ((netif_running(dev)) && + (dev->wireless_handlers->standard[0] != NULL)) /* Call the commit handler on the driver */ return dev->wireless_handlers->standard[0](dev, NULL, NULL, NULL); diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c index b379a0371653..33bef22e44e9 100644 --- a/net/wireless/wext-spy.c +++ b/net/wireless/wext-spy.c @@ -120,8 +120,8 @@ int iw_handler_set_thrspy(struct net_device * dev, return -EOPNOTSUPP; /* Just do it */ - spydata->spy_thr_low = threshold->low; - spydata->spy_thr_high = threshold->high; + memcpy(&(spydata->spy_thr_low), &(threshold->low), + 2 * sizeof(struct iw_quality)); /* Clear flag */ memset(spydata->spy_thr_under, '\0', sizeof(spydata->spy_thr_under)); @@ -147,8 +147,8 @@ int iw_handler_get_thrspy(struct net_device * dev, return -EOPNOTSUPP; /* Just do it */ - threshold->low = spydata->spy_thr_low; - threshold->high = spydata->spy_thr_high; + memcpy(&(threshold->low), &(spydata->spy_thr_low), + 2 * sizeof(struct iw_quality)); return 0; } @@ -173,10 +173,10 @@ static void iw_send_thrspy_event(struct net_device * dev, memcpy(threshold.addr.sa_data, address, ETH_ALEN); threshold.addr.sa_family = ARPHRD_ETHER; /* Copy stats */ - threshold.qual = *wstats; + memcpy(&(threshold.qual), wstats, sizeof(struct iw_quality)); /* Copy also thresholds */ - threshold.low = spydata->spy_thr_low; - threshold.high = spydata->spy_thr_high; + memcpy(&(threshold.low), &(spydata->spy_thr_low), + 2 * sizeof(struct iw_quality)); /* Send event to user space */ wireless_send_event(dev, SIOCGIWTHRSPY, &wrqu, (char *) &threshold); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 156639be7ed0..dd9a0cff2287 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -550,7 +550,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol, if (protocol) goto out; - rc = -ENOMEM; + rc = -ENOBUFS; if ((sk = x25_alloc_socket(net, kern)) == NULL) goto out; @@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) int len, i, rc = 0; if (addr_len != sizeof(struct sockaddr_x25) || - addr->sx25_family != AF_X25 || - strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) { + addr->sx25_family != AF_X25) { rc = -EINVAL; goto out; } @@ -774,8 +773,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, rc = -EINVAL; if (addr_len != sizeof(struct sockaddr_x25) || - addr->sx25_family != AF_X25 || - strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) + addr->sx25_family != AF_X25) goto out; rc = -ENETUNREACH; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index d613bf77cc0f..1e87639f2c27 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -315,7 +315,7 @@ resume: /* only the first xfrm gets the encap type */ encap_type = 0; - if (x->repl->recheck(x, skb, seq)) { + if (async && x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3fd866867ce9..f93c95541d37 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -566,20 +566,6 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, copy_from_user_state(x, p); - if (attrs[XFRMA_ENCAP]) { - x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), - sizeof(*x->encap), GFP_KERNEL); - if (x->encap == NULL) - goto error; - } - - if (attrs[XFRMA_COADDR]) { - x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), - sizeof(*x->coaddr), GFP_KERNEL); - if (x->coaddr == NULL) - goto error; - } - if (attrs[XFRMA_SA_EXTRA_FLAGS]) x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); @@ -600,9 +586,23 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, attrs[XFRMA_ALG_COMP]))) goto error; + if (attrs[XFRMA_ENCAP]) { + x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), + sizeof(*x->encap), GFP_KERNEL); + if (x->encap == NULL) + goto error; + } + if (attrs[XFRMA_TFCPAD]) x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); + if (attrs[XFRMA_COADDR]) { + x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), + sizeof(*x->coaddr), GFP_KERNEL); + if (x->coaddr == NULL) + goto error; + } + xfrm_mark_get(attrs, &x->mark); if (attrs[XFRMA_OUTPUT_MARK]) diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c index a7f5ee8b6edc..2fca916d9edf 100644 --- a/samples/kfifo/bytestream-example.c +++ b/samples/kfifo/bytestream-example.c @@ -124,10 +124,8 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -142,10 +140,8 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static const struct file_operations fifo_fops = { diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c index a326a37e9163..8dc3c2e7105a 100644 --- a/samples/kfifo/inttype-example.c +++ b/samples/kfifo/inttype-example.c @@ -117,10 +117,8 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -135,10 +133,8 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static const struct file_operations fifo_fops = { diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c index deb87a2e4e6b..2d7529eeb294 100644 --- a/samples/kfifo/record-example.c +++ b/samples/kfifo/record-example.c @@ -131,10 +131,8 @@ static ssize_t fifo_write(struct file *file, const char __user *buf, ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static ssize_t fifo_read(struct file *file, char __user *buf, @@ -149,10 +147,8 @@ static ssize_t fifo_read(struct file *file, char __user *buf, ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_lock); - if (ret) - return ret; - return copied; + return ret ? ret : copied; } static const struct file_operations fifo_fops = { diff --git a/scripts/Makefile b/scripts/Makefile index 151cedeeef4c..fd0d53d4a234 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -11,9 +11,6 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include -CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto) -CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null) - hostprogs-$(CONFIG_KALLSYMS) += kallsyms hostprogs-$(CONFIG_LOGO) += pnmtologo hostprogs-$(CONFIG_VT) += conmakehash @@ -25,10 +22,8 @@ hostprogs-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += extract-cert HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include -HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS) -HOSTLOADLIBES_sign-file = $(CRYPTO_LIBS) -HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS) -HOSTLOADLIBES_extract-cert = $(CRYPTO_LIBS) +HOSTLOADLIBES_sign-file = -lcrypto +HOSTLOADLIBES_extract-cert = -lcrypto always := $(hostprogs-y) $(hostprogs-m) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 726d1b3f6759..e6441514be23 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -226,8 +226,6 @@ cmd_modversions_c = \ endif ifdef CONFIG_FTRACE_MCOUNT_RECORD -ifndef CC_USING_RECORD_MCOUNT -# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl ifdef BUILD_C_RECORDMCOUNT ifeq ("$(origin RECORDMCOUNT_WARN)", "command line") RECORDMCOUNT_FLAGS = -w @@ -256,7 +254,6 @@ cmd_record_mcount = \ "$(CC_FLAGS_FTRACE)" ]; then \ $(sub_cmd_record_mcount) \ fi; -endif # CC_USING_RECORD_MCOUNT endif define rule_cc_o_c diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index b1b545ef8091..11096b2fa5cb 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -68,6 +68,5 @@ KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare) KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length) KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized) KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) -KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) endif endif diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 785761eedee0..2951d2622489 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3738,7 +3738,7 @@ sub process { $fix) { fix_delete_line($fixlinenr, $rawline); my $fixed_line = $rawline; - $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/; + $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/; my $line1 = $1; my $line2 = $2; fix_insert_line($fixlinenr, ltrim($line1)); diff --git a/scripts/depmod.sh b/scripts/depmod.sh index b0cb89e73bc5..baedaef53ca0 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -14,8 +14,6 @@ if ! test -r System.map ; then exit 0 fi -# legacy behavior: "depmod" in /sbin, no /sbin in PATH -PATH="$PATH:/sbin" if [ -z $(command -v $DEPMOD) ]; then echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 echo "This is probably in the kmod package." >&2 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index c58a46904861..f7049e288e93 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -502,8 +502,8 @@ static int get_mext_match(const char *match_str, match_f flag) else if (flag == FIND_NEXT_MATCH_UP) --match_start; - match_start = (match_start + items_num) % items_num; index = match_start; + index = (index + items_num) % items_num; while (true) { char *str = k_menu_items[index].str; if (strcasestr(str, match_str) != 0) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 8fd667ecfba9..5c723833ec54 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -86,23 +86,15 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" # Only replace the real compile.h if the new one is different, # in order to preserve the timestamp and avoid unnecessary # recompilations. -# We don't consider the file changed if only the date/time changed, -# unless KBUILD_BUILD_TIMESTAMP was explicitly set (e.g. for -# reproducible builds with that value referring to a commit timestamp). +# We don't consider the file changed if only the date/time changed. # A kernel config change will increase the generation number, thus # causing compile.h to be updated (including date/time) due to the # changed comment in the # first line. -if [ -z "$KBUILD_BUILD_TIMESTAMP" ]; then - IGNORE_PATTERN="UTS_VERSION" -else - IGNORE_PATTERN="NOT_A_PATTERN_TO_BE_MATCHED" -fi - if [ -r $TARGET ] && \ - grep -v $IGNORE_PATTERN $TARGET > .tmpver.1 && \ - grep -v $IGNORE_PATTERN .tmpcompile > .tmpver.2 && \ + grep -v 'UTS_VERSION' $TARGET > .tmpver.1 && \ + grep -v 'UTS_VERSION' .tmpcompile > .tmpver.2 && \ cmp -s .tmpver.1 .tmpver.2; then rm -f .tmpcompile else diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 8cba4c44da4c..7250fb38350c 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -362,7 +362,7 @@ static uint32_t (*w2)(uint16_t); static int is_mcounted_section_name(char const *const txtname) { - return strncmp(".text", txtname, 5) == 0 || + return strcmp(".text", txtname) == 0 || strcmp(".ref.text", txtname) == 0 || strcmp(".sched.text", txtname) == 0 || strcmp(".spinlock.text", txtname) == 0 || diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index e1de4423abce..96e2486a6fc4 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -138,11 +138,6 @@ my %text_sections = ( ".text.unlikely" => 1, ); -# Acceptable section-prefixes to record. -my %text_section_prefixes = ( - ".text." => 1, -); - # Note: we are nice to C-programmers here, thus we skip the '||='-idiom. $objdump = 'objdump' if (!$objdump); $objcopy = 'objcopy' if (!$objcopy); @@ -248,7 +243,7 @@ if ($arch eq "x86_64") { } elsif ($arch eq "s390" && $bits == 64) { if ($cc =~ /-DCC_USING_HOTPATCH/) { - $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$"; + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$"; $mcount_adjust = 0; } else { $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; @@ -264,11 +259,7 @@ if ($arch eq "x86_64") { # force flags for this arch $ld .= " -m shlelf_linux"; - if ($endian eq "big") { - $objcopy .= " -O elf32-shbig-linux"; - } else { - $objcopy .= " -O elf32-sh-linux"; - } + $objcopy .= " -O elf32-sh-linux"; } elsif ($arch eq "powerpc") { $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; @@ -508,14 +499,6 @@ while () { # Only record text sections that we know are safe $read_function = defined($text_sections{$1}); - if (!$read_function) { - foreach my $prefix (keys %text_section_prefixes) { - if (substr($1, 0, length $prefix) eq $prefix) { - $read_function = 1; - last; - } - } - } # print out any recorded offsets update_funcs(); diff --git a/scripts/tracing/draw_functrace.py b/scripts/tracing/draw_functrace.py index 8754b8fbe943..db40fa04cd51 100755 --- a/scripts/tracing/draw_functrace.py +++ b/scripts/tracing/draw_functrace.py @@ -17,7 +17,7 @@ Usage: $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) - $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace + $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ @@ -103,10 +103,10 @@ def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException - m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) + m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException - return (m.group(2), m.group(3), m.group(4)) + return (m.group(1), m.group(2), m.group(3)) def main(): diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index 8af394719e6c..30aced99bc55 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -391,11 +391,11 @@ int __init ima_fs_init(void) return 0; out: - securityfs_remove(ima_policy); securityfs_remove(violations); securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_dir); + securityfs_remove(ima_policy); return -1; } diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c index 6c415667ba67..90987d15b6fe 100644 --- a/security/integrity/integrity_audit.c +++ b/security/integrity/integrity_audit.c @@ -39,8 +39,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, return; ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno); - if (!ab) - return; audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", task_pid_nr(current), from_kuid(&init_user_ns, current_cred()->uid), diff --git a/security/keys/trusted.c b/security/keys/trusted.c index 4b350fec3153..214ae2dc7f64 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c @@ -778,7 +778,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; - else if (*args[0].from != '1') + else return -EINVAL; break; case Opt_pcrlock: diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 24ea1eeee9cd..d0b74c12d56d 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -264,9 +264,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, struct inode *inode; audit_log_format(ab, " name="); - spin_lock(&a->u.dentry->d_lock); audit_log_untrustedstring(ab, a->u.dentry->d_name.name); - spin_unlock(&a->u.dentry->d_lock); inode = d_backing_inode(a->u.dentry); if (inode) { @@ -284,9 +282,8 @@ static void dump_common_audit_data(struct audit_buffer *ab, dentry = d_find_alias(inode); if (dentry) { audit_log_format(ab, " name="); - spin_lock(&dentry->d_lock); - audit_log_untrustedstring(ab, dentry->d_name.name); - spin_unlock(&dentry->d_lock); + audit_log_untrustedstring(ab, + dentry->d_name.name); dput(dentry); } audit_log_format(ab, " dev="); diff --git a/security/security.c b/security/security.c index 6cced62a81b6..e60d560e45f8 100644 --- a/security/security.c +++ b/security/security.c @@ -130,25 +130,25 @@ int __init security_module_enable(const char *module) /* Security operations */ -int security_binder_set_context_mgr(const struct cred *mgr) +int security_binder_set_context_mgr(struct task_struct *mgr) { return call_int_hook(binder_set_context_mgr, 0, mgr); } -int security_binder_transaction(const struct cred *from, - const struct cred *to) +int security_binder_transaction(struct task_struct *from, + struct task_struct *to) { return call_int_hook(binder_transaction, 0, from, to); } -int security_binder_transfer_binder(const struct cred *from, - const struct cred *to) +int security_binder_transfer_binder(struct task_struct *from, + struct task_struct *to) { return call_int_hook(binder_transfer_binder, 0, from, to); } -int security_binder_transfer_file(const struct cred *from, - const struct cred *to, struct file *file) +int security_binder_transfer_file(struct task_struct *from, + struct task_struct *to, struct file *file) { return call_int_hook(binder_transfer_file, 0, from, to, file); } diff --git a/security/selinux/avc.c b/security/selinux/avc.c index b3d995e52d34..f5a2c6285dea 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -357,27 +357,26 @@ static struct avc_xperms_decision_node struct avc_xperms_decision_node *xpd_node; struct extended_perms_decision *xpd; - xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, - GFP_NOWAIT | __GFP_NOWARN); + xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT); if (!xpd_node) return NULL; xpd = &xpd_node->xpd; if (which & XPERMS_ALLOWED) { xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->allowed) goto error; } if (which & XPERMS_AUDITALLOW) { xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->auditallow) goto error; } if (which & XPERMS_DONTAUDIT) { xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep, - GFP_NOWAIT | __GFP_NOWARN); + GFP_NOWAIT); if (!xpd->dontaudit) goto error; } @@ -405,7 +404,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void) { struct avc_xperms_node *xp_node; - xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT | __GFP_NOWARN); + xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT); if (!xp_node) return xp_node; INIT_LIST_HEAD(&xp_node->xpd_head); @@ -558,7 +557,7 @@ static struct avc_node *avc_alloc_node(void) { struct avc_node *node; - node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT | __GFP_NOWARN); + node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT); if (!node) goto out; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index df8e18de8512..534019901c0b 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1977,18 +1977,21 @@ static inline u32 open_file_to_av(struct file *file) /* Hook functions begin here. */ -static int selinux_binder_set_context_mgr(const struct cred *mgr) +static int selinux_binder_set_context_mgr(struct task_struct *mgr) { - return avc_has_perm(current_sid(), cred_sid(mgr), SECCLASS_BINDER, + u32 mysid = current_sid(); + u32 mgrsid = task_sid(mgr); + + return avc_has_perm(mysid, mgrsid, SECCLASS_BINDER, BINDER__SET_CONTEXT_MGR, NULL); } -static int selinux_binder_transaction(const struct cred *from, - const struct cred *to) +static int selinux_binder_transaction(struct task_struct *from, + struct task_struct *to) { u32 mysid = current_sid(); - u32 fromsid = cred_sid(from); - u32 tosid = cred_sid(to); + u32 fromsid = task_sid(from); + u32 tosid = task_sid(to); int rc; if (mysid != fromsid) { @@ -2002,19 +2005,21 @@ static int selinux_binder_transaction(const struct cred *from, NULL); } -static int selinux_binder_transfer_binder(const struct cred *from, - const struct cred *to) +static int selinux_binder_transfer_binder(struct task_struct *from, + struct task_struct *to) { - return avc_has_perm(cred_sid(from), cred_sid(to), - SECCLASS_BINDER, BINDER__TRANSFER, + u32 fromsid = task_sid(from); + u32 tosid = task_sid(to); + + return avc_has_perm(fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER, NULL); } -static int selinux_binder_transfer_file(const struct cred *from, - const struct cred *to, +static int selinux_binder_transfer_file(struct task_struct *from, + struct task_struct *to, struct file *file) { - u32 sid = cred_sid(to); + u32 sid = task_sid(to); struct file_security_struct *fsec = file->f_security; struct inode *inode = d_backing_inode(file->f_path.dentry); struct inode_security_struct *isec = inode->i_security; @@ -5009,7 +5014,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, struct common_audit_data ad; struct lsm_network_audit net = {0,}; char *addrp; - u8 proto = 0; + u8 proto; if (sk == NULL) return NF_ACCEPT; diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 8a764f40730b..d40631150045 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -100,7 +100,7 @@ struct security_class_mapping secclass_map[] = { { COMMON_IPC_PERMS, NULL } }, { "netlink_route_socket", { COMMON_SOCK_PERMS, - "nlmsg_read", "nlmsg_write", "nlmsg_readpriv", NULL } }, + "nlmsg_read", "nlmsg_write", NULL } }, { "netlink_tcpdiag_socket", { COMMON_SOCK_PERMS, "nlmsg_read", "nlmsg_write", NULL } }, diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index b45a3a72c161..0464cbb709cd 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -78,7 +78,6 @@ enum { }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) -extern int selinux_android_netlink_route; extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; extern int selinux_policycap_alwaysnetwork; @@ -264,7 +263,6 @@ extern struct vfsmount *selinuxfs_mount; extern void selnl_notify_setenforce(int val); extern void selnl_notify_policyload(u32 seqno); extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); -extern void selinux_nlmsg_init(void); #endif /* _SELINUX_SECURITY_H_ */ diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 78a8c420b1f5..0714b4c61a8b 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -191,27 +191,3 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) return err; } - -static void nlmsg_set_getlink_perm(u32 perm) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(nlmsg_route_perms); i++) { - if (nlmsg_route_perms[i].nlmsg_type == RTM_GETLINK) { - nlmsg_route_perms[i].perm = perm; - break; - } - } -} - -/** - * Use nlmsg_readpriv as the permission for RTM_GETLINK messages if the - * netlink_route_getlink policy capability is set. Otherwise use nlmsg_read. - */ -void selinux_nlmsg_init(void) -{ - if (selinux_android_netlink_route) - nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV); - else - nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READ); -} diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 5ee23e3a3678..01fbbbf89f41 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2329,10 +2329,6 @@ int policydb_read(struct policydb *p, void *fp) p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN); p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN); - if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE)) { - p->android_netlink_route = 1; - } - if (p->policyvers >= POLICYDB_VERSION_POLCAP) { rc = ebitmap_read(&p->policycaps, fp); if (rc) diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 0d511cf3c1e9..725d5945a97e 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h @@ -227,7 +227,6 @@ struct genfs { /* The policy database */ struct policydb { int mls_enabled; - int android_netlink_route; /* symbol tables */ struct symtab symtab[SYM_NUM]; @@ -314,7 +313,6 @@ extern int policydb_write(struct policydb *p, void *fp); #define PERM_SYMTAB_SIZE 32 #define POLICYDB_CONFIG_MLS 1 -#define POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE (1 << 31) /* the config flags related to unknown classes/perms are bits 2 and 3 */ #define REJECT_UNKNOWN 0x00000002 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 43563e613946..55c869e0a3a0 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -70,7 +70,6 @@ #include "ebitmap.h" #include "audit.h" -int selinux_android_netlink_route; int selinux_policycap_netpeer; int selinux_policycap_openperm; int selinux_policycap_alwaysnetwork; @@ -1998,8 +1997,6 @@ static void security_load_policycaps(void) POLICYDB_CAPABILITY_OPENPERM); selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_ALWAYSNETWORK); - selinux_android_netlink_route = policydb.android_netlink_route; - selinux_nlmsg_init(); } static int security_preserve_bools(struct policydb *p); diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index 84f38b694242..0df316c62005 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -90,22 +90,23 @@ int log_policy = SMACK_AUDIT_DENIED; int smk_access_entry(char *subject_label, char *object_label, struct list_head *rule_list) { + int may = -ENOENT; struct smack_rule *srp; list_for_each_entry_rcu(srp, rule_list, list) { if (srp->smk_object->smk_known == object_label && srp->smk_subject->smk_known == subject_label) { - int may = srp->smk_access; - /* - * MAY_WRITE implies MAY_LOCK. - */ - if ((may & MAY_WRITE) == MAY_WRITE) - may |= MAY_LOCK; - return may; + may = srp->smk_access; + break; } } - return -ENOENT; + /* + * MAY_WRITE implies MAY_LOCK. + */ + if ((may & MAY_WRITE) == MAY_WRITE) + may |= MAY_LOCK; + return may; } /** diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index ce30b61c5617..df082648eb0a 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -721,7 +721,9 @@ static void smk_cipso_doi(void) printk(KERN_WARNING "%s:%d remove rc = %d\n", __func__, __LINE__, rc); - doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL | __GFP_NOFAIL); + doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL); + if (doip == NULL) + panic("smack: Failed to initialize cipso DOI.\n"); doip->map.std = NULL; doip->doi = smk_cipso_doi_value; doip->type = CIPSO_V4_MAP_PASS; @@ -740,7 +742,7 @@ static void smk_cipso_doi(void) if (rc != 0) { printk(KERN_WARNING "%s:%d map add rc = %d\n", __func__, __LINE__, rc); - netlbl_cfg_cipsov4_del(doip->doi, &nai); + kfree(doip); return; } } @@ -857,7 +859,6 @@ static int smk_open_cipso(struct inode *inode, struct file *file) static ssize_t smk_set_cipso(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int format) { - struct netlbl_lsm_catmap *old_cat; struct smack_known *skp; struct netlbl_lsm_secattr ncats; char mapcatset[SMK_CIPSOLEN]; @@ -951,11 +952,9 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); if (rc >= 0) { - old_cat = skp->smk_netlabel.attr.mls.cat; + netlbl_catmap_free(skp->smk_netlabel.attr.mls.cat); skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat; skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; - synchronize_rcu(); - netlbl_catmap_free(old_cat); rc = count; } diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 3fd7f67e701e..ac0a40b9ba1e 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -281,7 +281,6 @@ static int copy_ctl_value_to_user(void __user *userdata, struct snd_ctl_elem_value *data, int type, int count) { - struct snd_ctl_elem_value32 __user *data32 = userdata; int i, size; if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN || @@ -298,8 +297,6 @@ static int copy_ctl_value_to_user(void __user *userdata, if (copy_to_user(valuep, data->value.bytes.data, size)) return -EFAULT; } - if (copy_to_user(&data32->id, &data->id, sizeof(data32->id))) - return -EFAULT; return 0; } diff --git a/sound/core/init.c b/sound/core/init.c index f2b2102c0a72..969ab45bc85e 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -447,8 +447,10 @@ int snd_card_disconnect(struct snd_card *card) return 0; } card->shutdown = 1; + spin_unlock(&card->files_lock); /* replace file->f_op with special dummy operations */ + spin_lock(&card->files_lock); list_for_each_entry(mfile, &card->files_list, list) { /* it's critical part, use endless loop */ /* we have no room to fail */ diff --git a/sound/core/jack.c b/sound/core/jack.c index 5c31285ece16..45e9781b592e 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -68,13 +68,10 @@ static int snd_jack_dev_free(struct snd_device *device) struct snd_card *card = device->card; struct snd_jack_kctl *jack_kctl, *tmp_jack_kctl; - down_write(&card->controls_rwsem); list_for_each_entry_safe(jack_kctl, tmp_jack_kctl, &jack->kctl_list, list) { list_del_init(&jack_kctl->list); snd_ctl_remove(card, jack_kctl->kctl); } - up_write(&card->controls_rwsem); - if (jack->private_free) jack->private_free(jack); @@ -234,10 +231,6 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, return -ENOMEM; jack->id = kstrdup(id, GFP_KERNEL); - if (jack->id == NULL) { - kfree(jack); - return -ENOMEM; - } /* don't creat input device for phantom jack */ if (!phantom_jack) { diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c index 903fbf03559e..7a8c79dd9734 100644 --- a/sound/core/oss/mixer_oss.c +++ b/sound/core/oss/mixer_oss.c @@ -144,13 +144,11 @@ static int snd_mixer_oss_devmask(struct snd_mixer_oss_file *fmixer) if (mixer == NULL) return -EIO; - mutex_lock(&mixer->reg_mutex); for (chn = 0; chn < 31; chn++) { pslot = &mixer->slots[chn]; if (pslot->put_volume || pslot->put_recsrc) result |= 1 << chn; } - mutex_unlock(&mixer->reg_mutex); return result; } @@ -162,13 +160,11 @@ static int snd_mixer_oss_stereodevs(struct snd_mixer_oss_file *fmixer) if (mixer == NULL) return -EIO; - mutex_lock(&mixer->reg_mutex); for (chn = 0; chn < 31; chn++) { pslot = &mixer->slots[chn]; if (pslot->put_volume && pslot->stereo) result |= 1 << chn; } - mutex_unlock(&mixer->reg_mutex); return result; } @@ -179,7 +175,6 @@ static int snd_mixer_oss_recmask(struct snd_mixer_oss_file *fmixer) if (mixer == NULL) return -EIO; - mutex_lock(&mixer->reg_mutex); if (mixer->put_recsrc && mixer->get_recsrc) { /* exclusive */ result = mixer->mask_recsrc; } else { @@ -191,7 +186,6 @@ static int snd_mixer_oss_recmask(struct snd_mixer_oss_file *fmixer) result |= 1 << chn; } } - mutex_unlock(&mixer->reg_mutex); return result; } @@ -202,12 +196,11 @@ static int snd_mixer_oss_get_recsrc(struct snd_mixer_oss_file *fmixer) if (mixer == NULL) return -EIO; - mutex_lock(&mixer->reg_mutex); if (mixer->put_recsrc && mixer->get_recsrc) { /* exclusive */ + int err; unsigned int index; - result = mixer->get_recsrc(fmixer, &index); - if (result < 0) - goto unlock; + if ((err = mixer->get_recsrc(fmixer, &index)) < 0) + return err; result = 1 << index; } else { struct snd_mixer_oss_slot *pslot; @@ -222,10 +215,7 @@ static int snd_mixer_oss_get_recsrc(struct snd_mixer_oss_file *fmixer) } } } - mixer->oss_recsrc = result; - unlock: - mutex_unlock(&mixer->reg_mutex); - return result; + return mixer->oss_recsrc = result; } static int snd_mixer_oss_set_recsrc(struct snd_mixer_oss_file *fmixer, int recsrc) @@ -238,7 +228,6 @@ static int snd_mixer_oss_set_recsrc(struct snd_mixer_oss_file *fmixer, int recsr if (mixer == NULL) return -EIO; - mutex_lock(&mixer->reg_mutex); if (mixer->get_recsrc && mixer->put_recsrc) { /* exclusive input */ if (recsrc & ~mixer->oss_recsrc) recsrc &= ~mixer->oss_recsrc; @@ -264,7 +253,6 @@ static int snd_mixer_oss_set_recsrc(struct snd_mixer_oss_file *fmixer, int recsr } } } - mutex_unlock(&mixer->reg_mutex); return result; } @@ -276,7 +264,6 @@ static int snd_mixer_oss_get_volume(struct snd_mixer_oss_file *fmixer, int slot) if (mixer == NULL || slot > 30) return -EIO; - mutex_lock(&mixer->reg_mutex); pslot = &mixer->slots[slot]; left = pslot->volume[0]; right = pslot->volume[1]; @@ -284,21 +271,15 @@ static int snd_mixer_oss_get_volume(struct snd_mixer_oss_file *fmixer, int slot) result = pslot->get_volume(fmixer, pslot, &left, &right); if (!pslot->stereo) right = left; - if (snd_BUG_ON(left < 0 || left > 100)) { - result = -EIO; - goto unlock; - } - if (snd_BUG_ON(right < 0 || right > 100)) { - result = -EIO; - goto unlock; - } + if (snd_BUG_ON(left < 0 || left > 100)) + return -EIO; + if (snd_BUG_ON(right < 0 || right > 100)) + return -EIO; if (result >= 0) { pslot->volume[0] = left; pslot->volume[1] = right; result = (left & 0xff) | ((right & 0xff) << 8); } - unlock: - mutex_unlock(&mixer->reg_mutex); return result; } @@ -311,7 +292,6 @@ static int snd_mixer_oss_set_volume(struct snd_mixer_oss_file *fmixer, if (mixer == NULL || slot > 30) return -EIO; - mutex_lock(&mixer->reg_mutex); pslot = &mixer->slots[slot]; if (left > 100) left = 100; @@ -322,13 +302,10 @@ static int snd_mixer_oss_set_volume(struct snd_mixer_oss_file *fmixer, if (pslot->put_volume) result = pslot->put_volume(fmixer, pslot, left, right); if (result < 0) - goto unlock; + return result; pslot->volume[0] = left; pslot->volume[1] = right; - result = (left & 0xff) | ((right & 0xff) << 8); - unlock: - mutex_unlock(&mixer->reg_mutex); - return result; + return (left & 0xff) | ((right & 0xff) << 8); } static int snd_mixer_oss_ioctl1(struct snd_mixer_oss_file *fmixer, unsigned int cmd, unsigned long arg) diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 6af4afe23e37..443bb8ce8255 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -172,7 +172,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params, * * Return the maximum value for field PAR. */ -static int +static unsigned int snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir) { @@ -707,25 +707,17 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *oss_params, struct snd_pcm_hw_params *slave_params) { - ssize_t s; - ssize_t oss_buffer_size; - ssize_t oss_period_size, oss_periods; - ssize_t min_period_size, max_period_size; + size_t s; + size_t oss_buffer_size, oss_period_size, oss_periods; + size_t min_period_size, max_period_size; struct snd_pcm_runtime *runtime = substream->runtime; size_t oss_frame_size; oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) * params_channels(oss_params) / 8; - oss_buffer_size = snd_pcm_hw_param_value_max(slave_params, - SNDRV_PCM_HW_PARAM_BUFFER_SIZE, - NULL); - if (oss_buffer_size <= 0) - return -EINVAL; oss_buffer_size = snd_pcm_plug_client_size(substream, - oss_buffer_size * oss_frame_size); - if (oss_buffer_size <= 0) - return -EINVAL; + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size; oss_buffer_size = rounddown_pow_of_two(oss_buffer_size); if (atomic_read(&substream->mmap_count)) { if (oss_buffer_size > runtime->oss.mmap_bytes) @@ -761,21 +753,17 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, min_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - if (min_period_size > 0) { - min_period_size *= oss_frame_size; - min_period_size = roundup_pow_of_two(min_period_size); - if (oss_period_size < min_period_size) - oss_period_size = min_period_size; - } + min_period_size *= oss_frame_size; + min_period_size = roundup_pow_of_two(min_period_size); + if (oss_period_size < min_period_size) + oss_period_size = min_period_size; max_period_size = snd_pcm_plug_client_size(substream, snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL)); - if (max_period_size > 0) { - max_period_size *= oss_frame_size; - max_period_size = rounddown_pow_of_two(max_period_size); - if (oss_period_size > max_period_size) - oss_period_size = max_period_size; - } + max_period_size *= oss_frame_size; + max_period_size = rounddown_pow_of_two(max_period_size); + if (oss_period_size > max_period_size) + oss_period_size = max_period_size; oss_periods = oss_buffer_size / oss_period_size; @@ -783,7 +771,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, oss_periods = substream->oss.setup.periods; s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL); - if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags) + if (runtime->oss.maxfrags && s > runtime->oss.maxfrags) s = runtime->oss.maxfrags; if (oss_periods > s) oss_periods = s; @@ -909,15 +897,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) err = -EINVAL; goto failure; } - - err = choose_rate(substream, sparams, runtime->oss.rate); - if (err < 0) - goto failure; - err = snd_pcm_hw_param_near(substream, sparams, - SNDRV_PCM_HW_PARAM_CHANNELS, - runtime->oss.channels, NULL); - if (err < 0) - goto failure; + choose_rate(substream, sparams, runtime->oss.rate); + snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL); format = snd_pcm_oss_format_from(runtime->oss.format); @@ -2019,15 +2000,11 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val) { struct snd_pcm_runtime *runtime; - int fragshift; runtime = substream->runtime; if (runtime->oss.subdivision || runtime->oss.fragshift) return -EINVAL; - fragshift = val & 0xffff; - if (fragshift >= 25) /* should be large enough */ - return -EINVAL; - runtime->oss.fragshift = fragshift; + runtime->oss.fragshift = val & 0xffff; runtime->oss.maxfrags = (val >> 16) & 0xffff; if (runtime->oss.fragshift < 4) /* < 16 */ runtime->oss.fragshift = 4; @@ -2121,7 +2098,7 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr int err, cmd; #ifdef OSS_DEBUG - pr_debug("pcm_oss: trigger = 0x%x\n", trigger); + pcm_dbg(substream->pcm, "pcm_oss: trigger = 0x%x\n", trigger); #endif psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 0268572c8b60..8239ebc9349c 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -858,11 +858,7 @@ EXPORT_SYMBOL(snd_pcm_new_internal); static void free_chmap(struct snd_pcm_str *pstr) { if (pstr->chmap_kctl) { - struct snd_card *card = pstr->pcm->card; - - down_write(&card->controls_rwsem); - snd_ctl_remove(card, pstr->chmap_kctl); - up_write(&card->controls_rwsem); + snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl); pstr->chmap_kctl = NULL; } if (pstr->vol_kctl) { diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index dfb0e496a5b0..ef5743af33de 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -1845,7 +1845,7 @@ static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream, channels = params_channels(params); frame_size = snd_pcm_format_size(format, channels); if (frame_size > 0) - params->fifo_size /= frame_size; + params->fifo_size /= (unsigned)frame_size; } return 0; } diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index 48b3398e9e9d..df5b984bb33f 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c @@ -624,8 +624,7 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in if (info->is_midi) { struct midi_info minf; - if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf)) - return -ENXIO; + snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf); inf->synth_type = SYNTH_TYPE_MIDI; inf->synth_subtype = 0; inf->nr_voices = 16; diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c index 5d16b2079119..e40a2cba5002 100644 --- a/sound/core/seq/seq_device.c +++ b/sound/core/seq/seq_device.c @@ -162,8 +162,6 @@ static int snd_seq_device_dev_free(struct snd_device *device) struct snd_seq_device *dev = device->device_data; cancel_autoload_drivers(); - if (dev->private_free) - dev->private_free(dev); put_device(&dev->dev); return 0; } @@ -191,7 +189,11 @@ static int snd_seq_device_dev_disconnect(struct snd_device *device) static void snd_seq_dev_release(struct device *dev) { - kfree(to_seq_dev(dev)); + struct snd_seq_device *sdev = to_seq_dev(dev); + + if (sdev->private_free) + sdev->private_free(sdev); + kfree(sdev); } /* diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 8f694438ecdd..a42e2ce4a726 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c @@ -532,11 +532,10 @@ static int check_and_subscribe_port(struct snd_seq_client *client, return err; } -/* called with grp->list_mutex held */ -static void __delete_and_unsubscribe_port(struct snd_seq_client *client, - struct snd_seq_client_port *port, - struct snd_seq_subscribers *subs, - bool is_src, bool ack) +static void delete_and_unsubscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *list; @@ -544,6 +543,7 @@ static void __delete_and_unsubscribe_port(struct snd_seq_client *client, grp = is_src ? &port->c_src : &port->c_dest; list = is_src ? &subs->src_list : &subs->dest_list; + down_write(&grp->list_mutex); write_lock_irq(&grp->list_lock); empty = list_empty(list); if (!empty) @@ -553,18 +553,6 @@ static void __delete_and_unsubscribe_port(struct snd_seq_client *client, if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); -} - -static void delete_and_unsubscribe_port(struct snd_seq_client *client, - struct snd_seq_client_port *port, - struct snd_seq_subscribers *subs, - bool is_src, bool ack) -{ - struct snd_seq_port_subs_info *grp; - - grp = is_src ? &port->c_src : &port->c_dest; - down_write(&grp->list_mutex); - __delete_and_unsubscribe_port(client, port, subs, is_src, ack); up_write(&grp->list_mutex); } @@ -620,30 +608,27 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { - struct snd_seq_port_subs_info *dest = &dest_port->c_dest; + struct snd_seq_port_subs_info *src = &src_port->c_src; struct snd_seq_subscribers *subs; int err = -ENOENT; - /* always start from deleting the dest port for avoiding concurrent - * deletions - */ - down_write(&dest->list_mutex); + down_write(&src->list_mutex); /* look for the connection */ - list_for_each_entry(subs, &dest->list_head, dest_list) { + list_for_each_entry(subs, &src->list_head, src_list) { if (match_subs_info(info, &subs->info)) { - __delete_and_unsubscribe_port(dest_client, dest_port, - subs, false, - connector->number != dest_client->number); + atomic_dec(&subs->ref_count); /* mark as not ready */ err = 0; break; } } - up_write(&dest->list_mutex); + up_write(&src->list_mutex); if (err < 0) return err; delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); + delete_and_unsubscribe_port(dest_client, dest_port, subs, false, + connector->number != dest_client->number); kfree(subs); return 0; } diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index b923059a2227..ea1aa0796276 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -257,15 +257,12 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name) /* -------------------------------------------------------- */ -#define MAX_CELL_PROCESSES_IN_QUEUE 1000 - void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) { unsigned long flags; struct snd_seq_event_cell *cell; snd_seq_tick_time_t cur_tick; snd_seq_real_time_t cur_time; - int processed = 0; if (q == NULL) return; @@ -288,8 +285,6 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); - if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) - goto out; /* the rest processed at the next batch */ } /* Process time queue... */ @@ -299,19 +294,14 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); - if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) - goto out; /* the rest processed at the next batch */ } - out: /* free lock */ spin_lock_irqsave(&q->check_lock, flags); if (q->check_again) { q->check_again = 0; - if (processed < MAX_CELL_PROCESSES_IN_QUEUE) { - spin_unlock_irqrestore(&q->check_lock, flags); - goto __again; - } + spin_unlock_irqrestore(&q->check_lock, flags); + goto __again; } q->check_blocked = 0; spin_unlock_irqrestore(&q->check_lock, flags); diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h index 7909cf6040e3..719093489a2c 100644 --- a/sound/core/seq/seq_queue.h +++ b/sound/core/seq/seq_queue.h @@ -40,10 +40,10 @@ struct snd_seq_queue { struct snd_seq_timer *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ - bool locked; /* timer is only accesibble by owner if set */ - bool klocked; /* kernel lock (after START) */ - bool check_again; /* concurrent access happened during check */ - bool check_blocked; /* queue being checked */ + unsigned int locked:1, /* timer is only accesibble by owner if set */ + klocked:1, /* kernel lock (after START) */ + check_again:1, + check_blocked:1; unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ diff --git a/sound/core/timer.c b/sound/core/timer.c index f1845db5110f..94c563c2c1dc 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -432,10 +432,9 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; - event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */ list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) - ts->ccallback(ts, event, &tstamp, resolution); + ts->ccallback(ts, event + 100, &tstamp, resolution); } /* start/continue a master timer */ @@ -525,13 +524,13 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); - list_del_init(&timeri->ack_list); - list_del_init(&timeri->active_list); if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START))) { result = -EBUSY; goto unlock; } + list_del_init(&timeri->ack_list); + list_del_init(&timeri->active_list); if (timer->card && timer->card->shutdown) goto unlock; if (stop) { @@ -566,22 +565,23 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop) static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop) { unsigned long flags; - bool running; spin_lock_irqsave(&slave_active_lock, flags); - running = timeri->flags & SNDRV_TIMER_IFLG_RUNNING; + if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { + spin_unlock_irqrestore(&slave_active_lock, flags); + return -EBUSY; + } timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; if (timeri->timer) { spin_lock(&timeri->timer->lock); list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); - if (running) - snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : - SNDRV_TIMER_EVENT_PAUSE); + snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : + SNDRV_TIMER_EVENT_PAUSE); spin_unlock(&timeri->timer->lock); } spin_unlock_irqrestore(&slave_active_lock, flags); - return running ? 0 : -EBUSY; + return 0; } /* diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index cc600aa0f6c7..847f70348d4d 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -1062,14 +1062,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) return -ENOMEM; kctl->id.device = dev; kctl->id.subdevice = substr; - - /* Add the control before copying the id so that - * the numid field of the id is set in the copy. - */ - err = snd_ctl_add(card, kctl); - if (err < 0) - return err; - switch (idx) { case ACTIVE_IDX: setup->active_id = kctl->id; @@ -1086,6 +1078,9 @@ static int loopback_mixer_new(struct loopback *loopback, int notify) default: break; } + err = snd_ctl_add(card, kctl); + if (err < 0) + return err; } } } diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c index ff67c4b67e26..7821b07415a7 100644 --- a/sound/drivers/opl3/opl3_midi.c +++ b/sound/drivers/opl3/opl3_midi.c @@ -415,7 +415,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan) } if (instr_4op) { vp2 = &opl3->voices[voice + 3]; - if (vp2->state > 0) { + if (vp->state > 0) { opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset + 3); reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT; diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig index 850315d1abca..e92a6d949847 100644 --- a/sound/firewire/Kconfig +++ b/sound/firewire/Kconfig @@ -36,7 +36,7 @@ config SND_OXFW * Mackie(Loud) Onyx-i series (former models) * Mackie(Loud) Onyx Satellite * Mackie(Loud) Tapco Link.Firewire - * Mackie(Loud) d.2 pro/d.4 pro (built-in FireWire card with OXFW971 ASIC) + * Mackie(Loud) d.2 pro/d.4 pro * Mackie(Loud) U.420/U.420d * TASCAM FireOne @@ -91,7 +91,7 @@ config SND_BEBOB * PreSonus FIREBOX/FIREPOD/FP10/Inspire1394 * BridgeCo RDAudio1/Audio5 * Mackie Onyx 1220/1620/1640 (FireWire I/O Card) - * Mackie d.2 (optional FireWire card with DM1000 ASIC) + * Mackie d.2 (FireWire Option) * Stanton FinalScratch 2 (ScratchAmp) * Tascam IF-FW/DM * Behringer XENIX UFX 1204/1604 @@ -117,7 +117,6 @@ config SND_BEBOB * M-Audio Ozonic/NRV10/ProfireLightBridge * M-Audio FireWire 1814/ProjectMix IO * Digidesign Mbox 2 Pro - * ToneWeal FW66 To compile this driver as a module, choose M here: the module will be called snd-bebob. diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index c3c14e383e73..3a0361458597 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -60,7 +60,6 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS); #define VEN_MAUDIO1 0x00000d6c #define VEN_MAUDIO2 0x000007f5 #define VEN_DIGIDESIGN 0x00a07e -#define OUI_SHOUYO 0x002327 #define MODEL_FOCUSRITE_SAFFIRE_BOTH 0x00000000 #define MODEL_MAUDIO_AUDIOPHILE_BOTH 0x00010060 @@ -363,7 +362,7 @@ static const struct ieee1394_device_id bebob_id_table[] = { SND_BEBOB_DEV_ENTRY(VEN_BRIDGECO, 0x00010049, &spec_normal), /* Mackie, Onyx 1220/1620/1640 (Firewire I/O Card) */ SND_BEBOB_DEV_ENTRY(VEN_MACKIE2, 0x00010065, &spec_normal), - // Mackie, d.2 (optional Firewire card with DM1000). + /* Mackie, d.2 (Firewire Option) */ SND_BEBOB_DEV_ENTRY(VEN_MACKIE1, 0x00010067, &spec_normal), /* Stanton, ScratchAmp */ SND_BEBOB_DEV_ENTRY(VEN_STANTON, 0x00000001, &spec_normal), @@ -462,8 +461,6 @@ static const struct ieee1394_device_id bebob_id_table[] = { &maudio_special_spec), /* Digidesign Mbox 2 Pro */ SND_BEBOB_DEV_ENTRY(VEN_DIGIDESIGN, 0x0000a9, &spec_normal), - // Toneweal FW66. - SND_BEBOB_DEV_ENTRY(OUI_SHOUYO, 0x020002, &spec_normal), /* IDs are unknown but able to be supported */ /* Apogee, Mini-ME Firewire */ /* Apogee, Mini-DAC Firewire */ diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index c700e11ab327..588b93f20c2e 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -320,7 +320,8 @@ static const struct ieee1394_device_id oxfw_id_table[] = { * Onyx-i series (former models): 0x081216 * Mackie Onyx Satellite: 0x00200f * Tapco LINK.firewire 4x6: 0x000460 - * d.2 pro/d.4 pro (built-in card): Unknown + * d.2 pro: Unknown + * d.4 pro: Unknown * U.420: Unknown * U.420d: Unknown */ diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 4ee3458ad810..4727f5b80e76 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -288,9 +288,8 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset) if (!full_reset) goto skip_reset; - /* clear STATESTS if not in reset */ - if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET) - snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK); + /* clear STATESTS */ + snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK); /* reset controller */ snd_hdac_bus_enter_link_reset(bus); diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c index 463906882b95..dfedfd85f205 100644 --- a/sound/isa/cmi8330.c +++ b/sound/isa/cmi8330.c @@ -564,7 +564,7 @@ static int snd_cmi8330_probe(struct snd_card *card, int dev) } if (acard->sb->hardware != SB_HW_16) { snd_printk(KERN_ERR PFX "SB16 not found during probe\n"); - return -ENODEV; + return err; } snd_wss_out(acard->wss, CS4231_MISC_INFO, 0x40); /* switch on MODE2 */ diff --git a/sound/isa/gus/gus_dma.c b/sound/isa/gus/gus_dma.c index 2e27cd3427c8..36c27c832360 100644 --- a/sound/isa/gus/gus_dma.c +++ b/sound/isa/gus/gus_dma.c @@ -141,8 +141,6 @@ static void snd_gf1_dma_interrupt(struct snd_gus_card * gus) } block = snd_gf1_dma_next_block(gus); spin_unlock(&gus->dma_lock); - if (!block) - return; snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd); kfree(block); #if 0 diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c index 470058e89fef..94c411299e5a 100644 --- a/sound/isa/sb/emu8000.c +++ b/sound/isa/sb/emu8000.c @@ -1042,10 +1042,8 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu) memset(emu->controls, 0, sizeof(emu->controls)); for (i = 0; i < EMU8000_NUM_CONTROLS; i++) { - if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) { - emu->controls[i] = NULL; + if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) goto __error; - } } return 0; diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c index 23834691f4d3..48da2276683d 100644 --- a/sound/isa/sb/sb16_csp.c +++ b/sound/isa/sb/sb16_csp.c @@ -828,7 +828,6 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); - spin_unlock_irqrestore(&p->chip->mixer_lock, flags); spin_lock(&p->chip->reg_lock); set_mode_register(p->chip, 0xc0); /* c0 = STOP */ @@ -868,7 +867,6 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ - spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); @@ -894,7 +892,6 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); - spin_unlock_irqrestore(&p->chip->mixer_lock, flags); spin_lock(&p->chip->reg_lock); if (p->running & SNDRV_SB_CSP_ST_QSOUND) { @@ -909,7 +906,6 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) spin_unlock(&p->chip->reg_lock); /* restore PCM volume */ - spin_lock_irqsave(&p->chip->mixer_lock, flags); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); spin_unlock_irqrestore(&p->chip->mixer_lock, flags); @@ -1063,14 +1059,10 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p) spin_lock_init(&p->q_lock); - if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) { - p->qsound_switch = NULL; + if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) goto __error; - } - if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) { - p->qsound_space = NULL; + if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) goto __error; - } return 0; @@ -1090,14 +1082,10 @@ static void snd_sb_qsound_destroy(struct snd_sb_csp * p) card = p->chip->card; down_write(&card->controls_rwsem); - if (p->qsound_switch) { + if (p->qsound_switch) snd_ctl_remove(card, p->qsound_switch); - p->qsound_switch = NULL; - } - if (p->qsound_space) { + if (p->qsound_space) snd_ctl_remove(card, p->qsound_space); - p->qsound_space = NULL; - } up_write(&card->controls_rwsem); /* cancel pending transfer of QSound parameters */ diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c index b8e2391c33ff..0c7fe1418447 100644 --- a/sound/isa/sb/sb8.c +++ b/sound/isa/sb/sb8.c @@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev) /* block the 0x388 port to avoid PnP conflicts */ acard->fm_res = request_region(0x388, 4, "SoundBlaster FM"); + if (!acard->fm_res) { + err = -EBUSY; + goto _err; + } if (port[dev] != SNDRV_AUTO_PORT) { if ((err = snd_sbdsp_create(card, port[dev], irq[dev], diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c index d32685ce6c05..5fcbb065d870 100644 --- a/sound/pci/ctxfi/ctamixer.c +++ b/sound/pci/ctxfi/ctamixer.c @@ -27,15 +27,16 @@ #define BLANK_SLOT 4094 -static void amixer_master(struct rsc *rsc) +static int amixer_master(struct rsc *rsc) { rsc->conj = 0; - rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; + return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0]; } -static void amixer_next_conj(struct rsc *rsc) +static int amixer_next_conj(struct rsc *rsc) { rsc->conj++; + return container_of(rsc, struct amixer, rsc)->idx[rsc->conj]; } static int amixer_index(const struct rsc *rsc) @@ -334,15 +335,16 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr) /* SUM resource management */ -static void sum_master(struct rsc *rsc) +static int sum_master(struct rsc *rsc) { rsc->conj = 0; - rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; + return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0]; } -static void sum_next_conj(struct rsc *rsc) +static int sum_next_conj(struct rsc *rsc) { rsc->conj++; + return container_of(rsc, struct sum, rsc)->idx[rsc->conj]; } static int sum_index(const struct rsc *rsc) diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index df326b7663a2..7f089cb433e1 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = { [SPDIFIO] = {.left = 0x05, .right = 0x85}, }; -static void daio_master(struct rsc *rsc) +static int daio_master(struct rsc *rsc) { /* Actually, this is not the resource index of DAIO. * For DAO, it is the input mapper index. And, for DAI, * it is the output time-slot index. */ - rsc->conj = rsc->idx; + return rsc->conj = rsc->idx; } static int daio_index(const struct rsc *rsc) @@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc) return rsc->conj; } -static void daio_out_next_conj(struct rsc *rsc) +static int daio_out_next_conj(struct rsc *rsc) { - rsc->conj += 2; + return rsc->conj += 2; } -static void daio_in_next_conj_20k1(struct rsc *rsc) +static int daio_in_next_conj_20k1(struct rsc *rsc) { - rsc->conj += 0x200; + return rsc->conj += 0x200; } -static void daio_in_next_conj_20k2(struct rsc *rsc) +static int daio_in_next_conj_20k2(struct rsc *rsc) { - rsc->conj += 0x100; + return rsc->conj += 0x100; } static const struct rsc_ops daio_out_rsc_ops = { diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c index 5beb4a3d203b..d86678c2a957 100644 --- a/sound/pci/ctxfi/cthw20k2.c +++ b/sound/pci/ctxfi/cthw20k2.c @@ -995,7 +995,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf) if (idx < 4) { /* S/PDIF output */ - switch ((conf & 0xf)) { + switch ((conf & 0x7)) { case 1: set_field(&ctl->txctl[idx], ATXCTL_NUC, 0); break; diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c index f610c32ae5ad..c5124c3c0fd1 100644 --- a/sound/pci/ctxfi/ctresource.c +++ b/sound/pci/ctxfi/ctresource.c @@ -113,17 +113,18 @@ static int audio_ring_slot(const struct rsc *rsc) return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type]; } -static void rsc_next_conj(struct rsc *rsc) +static int rsc_next_conj(struct rsc *rsc) { unsigned int i; for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); ) i++; rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i); + return rsc->conj; } -static void rsc_master(struct rsc *rsc) +static int rsc_master(struct rsc *rsc) { - rsc->conj = rsc->idx; + return rsc->conj = rsc->idx; } static const struct rsc_ops rsc_generic_ops = { diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h index 29b6fe6de659..736d9f7e9e16 100644 --- a/sound/pci/ctxfi/ctresource.h +++ b/sound/pci/ctxfi/ctresource.h @@ -43,8 +43,8 @@ struct rsc { }; struct rsc_ops { - void (*master)(struct rsc *rsc); /* Move to master resource */ - void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ + int (*master)(struct rsc *rsc); /* Move to master resource */ + int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */ int (*index)(const struct rsc *rsc); /* Return the index of resource */ /* Return the output slot number */ int (*output_slot)(const struct rsc *rsc); diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c index 234a7e96fd08..a5a72df29801 100644 --- a/sound/pci/ctxfi/ctsrc.c +++ b/sound/pci/ctxfi/ctsrc.c @@ -594,15 +594,16 @@ int src_mgr_destroy(struct src_mgr *src_mgr) /* SRCIMP resource manager operations */ -static void srcimp_master(struct rsc *rsc) +static int srcimp_master(struct rsc *rsc) { rsc->conj = 0; - rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; + return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; } -static void srcimp_next_conj(struct rsc *rsc) +static int srcimp_next_conj(struct rsc *rsc) { rsc->conj++; + return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj]; } static int srcimp_index(const struct rsc *rsc) diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index f25c2c43c562..d0d6dfbfcfdf 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c @@ -46,10 +46,6 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev) if (codec->bus->shutdown) return; - /* ignore unsol events during system suspend/resume */ - if (codec->core.dev.power.power_state.event != PM_EVENT_ON) - return; - if (codec->patch_ops.unsol_event) codec->patch_ops.unsol_event(codec, ev); } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7533f8860c57..4962a9d8a572 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1608,11 +1608,8 @@ void snd_hda_ctls_clear(struct hda_codec *codec) { int i; struct hda_nid_item *items = codec->mixers.list; - - down_write(&codec->card->controls_rwsem); for (i = 0; i < codec->mixers.used; i++) snd_ctl_remove(codec->card, items[i].kctl); - up_write(&codec->card->controls_rwsem); snd_array_free(&codec->mixers); snd_array_free(&codec->nids); } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 5c708d9851b8..7cd1047a4edf 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -1182,17 +1182,11 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch, *index = ch; return "Headphone"; case AUTO_PIN_LINE_OUT: - /* This deals with the case where one HP or one Speaker or - * one HP + one Speaker need to share the DAC with LO - */ - if (!ch) { - bool hp_lo_shared = false, spk_lo_shared = false; - - if (cfg->speaker_outs) - spk_lo_shared = !path_has_mixer(codec, - spec->speaker_paths[0], ctl_type); - if (cfg->hp_outs) - hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type); + /* This deals with the case where we have two DACs and + * one LO, one HP and one Speaker */ + if (!ch && cfg->speaker_outs && cfg->hp_outs) { + bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type); + bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type); if (hp_lo_shared && spk_lo_shared) return spec->vmaster_mute.hook ? "PCM" : "Master"; if (hp_lo_shared) @@ -1350,20 +1344,16 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs, struct nid_path *path; hda_nid_t pin = pins[i]; - if (!spec->obey_preferred_dacs) { - path = snd_hda_get_path_from_idx(codec, path_idx[i]); - if (path) { - badness += assign_out_path_ctls(codec, path); - continue; - } + path = snd_hda_get_path_from_idx(codec, path_idx[i]); + if (path) { + badness += assign_out_path_ctls(codec, path); + continue; } dacs[i] = get_preferred_dac(codec, pin); if (dacs[i]) { if (is_dac_already_used(codec, dacs[i])) badness += bad->shared_primary; - } else if (spec->obey_preferred_dacs) { - badness += BAD_NO_PRIMARY_DAC; } if (!dacs[i]) @@ -3442,7 +3432,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, struct hda_gen_spec *spec = codec->spec; const struct hda_input_mux *imux; struct nid_path *path; - int i, adc_idx, ret, err = 0; + int i, adc_idx, err = 0; imux = &spec->input_mux; adc_idx = kcontrol->id.index; @@ -3452,13 +3442,9 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, if (!path || !path->ctls[type]) continue; kcontrol->private_value = path->ctls[type]; - ret = func(kcontrol, ucontrol); - if (ret < 0) { - err = ret; + err = func(kcontrol, ucontrol); + if (err < 0) break; - } - if (ret > 0) - err = 1; } mutex_unlock(&codec->control_mutex); if (err >= 0 && spec->cap_sync_hook) diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 37cc6c8505ee..25f2397c29f7 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -229,7 +229,6 @@ struct hda_gen_spec { unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */ unsigned int power_down_unused:1; /* power down unused widgets */ unsigned int dac_min_mute:1; /* minimal = mute for DACs */ - unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */ /* other internal flags */ unsigned int no_analog:1; /* digital I/O only */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 90e8fd71f708..8dd6cf0b8939 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1459,7 +1459,6 @@ static struct snd_pci_quirk probe_mask_list[] = { /* forced codec slots */ SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103), SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103), - SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105), /* WinFast VP200 H (Teradici) user reported broken communication */ SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101), {} @@ -1625,6 +1624,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); + check_probe_mask(chip, dev); + chip->single_cmd = single_cmd; azx_check_snoop_available(chip); @@ -1653,8 +1654,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, chip->bus.needs_damn_long_delay = 1; } - check_probe_mask(chip, dev); - err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device [card]!\n"); diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 89359a962e47..039fbbb1e53c 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -363,9 +363,6 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev) unsigned short gcap; int irq_id = platform_get_irq(pdev, 0); - if (irq_id < 0) - return irq_id; - err = hda_tegra_init_chip(chip, pdev); if (err) return err; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 366e0386e296..c05119a3e13b 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4443,10 +4443,11 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) /* Delay enabling the HP amp, to let the mic-detection * state machine run. */ + cancel_delayed_work(&spec->unsol_hp_work); + schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); tbl = snd_hda_jack_tbl_get(codec, cb->nid); if (tbl) tbl->block_report = 1; - schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); } static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) @@ -4624,25 +4625,12 @@ static void ca0132_free(struct hda_codec *codec) kfree(codec->spec); } -#ifdef CONFIG_PM -static int ca0132_suspend(struct hda_codec *codec) -{ - struct ca0132_spec *spec = codec->spec; - - cancel_delayed_work_sync(&spec->unsol_hp_work); - return 0; -} -#endif - static struct hda_codec_ops ca0132_patch_ops = { .build_controls = ca0132_build_controls, .build_pcms = ca0132_build_pcms, .init = ca0132_init, .free = ca0132_free, .unsol_event = snd_hda_jack_unsol_event, -#ifdef CONFIG_PM - .suspend = ca0132_suspend, -#endif }; static void ca0132_config(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4c5f70302524..3150ddfbdb25 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1011,7 +1011,6 @@ static int patch_conexant_auto(struct hda_codec *codec) static const struct hda_device_id snd_hda_id_conexant[] = { HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), - HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 785c64b854ec..b249b1b85746 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2239,18 +2239,6 @@ static void generic_hdmi_free(struct hda_codec *codec) } #ifdef CONFIG_PM -static int generic_hdmi_suspend(struct hda_codec *codec) -{ - struct hdmi_spec *spec = codec->spec; - int pin_idx; - - for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { - struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); - cancel_delayed_work_sync(&per_pin->work); - } - return 0; -} - static int generic_hdmi_resume(struct hda_codec *codec) { struct hdmi_spec *spec = codec->spec; @@ -2274,7 +2262,6 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = { .build_controls = generic_hdmi_build_controls, .unsol_event = hdmi_unsol_event, #ifdef CONFIG_PM - .suspend = generic_hdmi_suspend, .resume = generic_hdmi_resume, #endif }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 583c7dd9aa39..0d6de70b4e08 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -330,7 +330,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0225: case 0x10ec0233: case 0x10ec0235: + case 0x10ec0236: case 0x10ec0255: + case 0x10ec0256: case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: @@ -340,11 +342,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; - case 0x10ec0236: - case 0x10ec0256: - alc_write_coef_idx(codec, 0x36, 0x5757); - alc_update_coef_idx(codec, 0x10, 1<<9, 0); - break; case 0x10ec0285: case 0x10ec0293: alc_update_coef_idx(codec, 0xa, 1<<13, 0); @@ -381,7 +378,6 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0892: - case 0x10ec0897: alc_update_coef_idx(codec, 0x7, 1<<5, 0); break; case 0x10ec0899: @@ -2226,13 +2222,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G", ALC882_FIXUP_ACER_ASPIRE_8930G), - SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", - ALC882_FIXUP_ACER_ASPIRE_4930G), - SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", ALC882_FIXUP_ACER_ASPIRE_4930G), + SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", + ALC882_FIXUP_ACER_ASPIRE_4930G), + SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), @@ -2244,11 +2240,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), - SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), - SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -4294,7 +4290,6 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec, struct alc_spec *spec = codec->spec; spec->current_headset_type = ALC_HEADSET_TYPE_UNKNOWN; snd_hda_gen_hp_automute(codec, jack); - alc_update_headset_mode(codec); } static void alc_probe_headset_mode(struct hda_codec *codec) @@ -4852,7 +4847,6 @@ enum { ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, - ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, ALC269_FIXUP_HEADSET_MODE, ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, ALC269_FIXUP_ASPIRE_HEADSET_MIC, @@ -5155,16 +5149,6 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC }, - [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE] = { - .type = HDA_FIXUP_PINS, - .v.pins = (const struct hda_pintbl[]) { - { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ - { 0x1b, 0x01a1913d }, /* use as headphone mic, without its own jack detect */ - { } - }, - .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE - }, [ALC269_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, @@ -5764,12 +5748,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101), + SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), - SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), @@ -6125,7 +6109,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60120}, {0x14, 0x90170110}, {0x21, 0x0321101f}), - SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, + SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0xb7a60130}, {0x14, 0x90170110}, {0x21, 0x04211020}), @@ -6209,10 +6193,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x17, 0x90170110}, {0x1a, 0x03011020}, {0x21, 0x03211030}), - SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, - ALC225_STANDARD_PINS, - {0x12, 0xb7a60130}, - {0x17, 0x90170110}), {} }; @@ -6491,7 +6471,8 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP), SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT), - SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F), + SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F), + SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F), SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505), {} }; @@ -7361,7 +7342,6 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882), HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882), HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662), - HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662), HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882), HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882), {} /* terminator */ diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 9dd104c308e1..fc30d1e8aa76 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -135,7 +135,6 @@ static struct via_spec *via_new_spec(struct hda_codec *codec) spec->codec_type = VT1708S; spec->gen.indep_hp = 1; spec->gen.keep_eapd_on = 1; - spec->gen.dac_min_mute = 1; spec->gen.pcm_playback_hook = via_playback_pcm_hook; spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; codec->power_save_node = 1; diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 4128c04fbfde..dd6c9e6a1d53 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -5314,8 +5314,7 @@ static int snd_hdsp_free(struct hdsp *hdsp) if (hdsp->port) pci_release_regions(hdsp->pci); - if (pci_is_enabled(hdsp->pci)) - pci_disable_device(hdsp->pci); + pci_disable_device(hdsp->pci); return 0; } diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index f4b164f19d30..1a0c0d16a279 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -6912,8 +6912,7 @@ static int snd_hdspm_free(struct hdspm * hdspm) if (hdspm->port) pci_release_regions(hdspm->pci); - if (pci_is_enabled(hdspm->pci)) - pci_disable_device(hdspm->pci); + pci_disable_device(hdspm->pci); return 0; } diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c index e5611ee9f2ae..c253bdf92e36 100644 --- a/sound/pci/rme9652/rme9652.c +++ b/sound/pci/rme9652/rme9652.c @@ -1761,8 +1761,7 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652) if (rme9652->port) pci_release_regions(rme9652->pci); - if (pci_is_enabled(rme9652->pci)) - pci_disable_device(rme9652->pci); + pci_disable_device(rme9652->pci); return 0; } diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c index 7c70ba5e2540..33c6be9fb388 100644 --- a/sound/ppc/powermac.c +++ b/sound/ppc/powermac.c @@ -90,11 +90,7 @@ static int snd_pmac_probe(struct platform_device *devptr) sprintf(card->shortname, "PowerMac %s", name_ext); sprintf(card->longname, "%s (Dev %d) Sub-frame %d", card->shortname, chip->device_id, chip->subframe); - err = snd_pmac_tumbler_init(chip); - if (err < 0) - goto __error; - err = snd_pmac_tumbler_post_init(); - if (err < 0) + if ( snd_pmac_tumbler_init(chip) < 0 || snd_pmac_tumbler_post_init() < 0) goto __error; break; case PMAC_AWACS: diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index a22879ddda47..7cd5f769bb61 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1269,7 +1269,6 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, dev_err(&i2c_client->dev, "CS42L56 Device ID (%X). Expected %X\n", devid, CS42L56_DEVID); - ret = -EINVAL; goto err_enable; } alpha_rev = reg & CS42L56_AREV_MASK; @@ -1325,7 +1324,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, ret = snd_soc_register_codec(&i2c_client->dev, &soc_codec_dev_cs42l56, &cs42l56_dai, 1); if (ret < 0) - goto err_enable; + return ret; return 0; diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index 146099ec8570..af2ed774b552 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -174,9 +174,6 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg) case RT286_PROC_COEF: case RT286_SET_AMP_GAIN_ADC_IN1: case RT286_SET_AMP_GAIN_ADC_IN2: - case RT286_SET_GPIO_MASK: - case RT286_SET_GPIO_DIRECTION: - case RT286_SET_GPIO_DATA: case RT286_SET_POWER(RT286_DAC_OUT1): case RT286_SET_POWER(RT286_DAC_OUT2): case RT286_SET_POWER(RT286_ADC_IN1): @@ -1120,11 +1117,12 @@ static const struct dmi_system_id force_combo_jack_table[] = { { } }; -static const struct dmi_system_id dmi_dell[] = { +static const struct dmi_system_id dmi_dell_dino[] = { { - .ident = "Dell", + .ident = "Dell Dino", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343") } }, { } @@ -1135,7 +1133,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c, { struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev); struct rt286_priv *rt286; - int i, ret, vendor_id; + int i, ret, val; rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286), GFP_KERNEL); @@ -1151,15 +1149,14 @@ static int rt286_i2c_probe(struct i2c_client *i2c, } ret = regmap_read(rt286->regmap, - RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id); + RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); if (ret != 0) { dev_err(&i2c->dev, "I2C error %d\n", ret); return ret; } - if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) { + if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) { dev_err(&i2c->dev, - "Device with ID register %#x is not rt286\n", - vendor_id); + "Device with ID register %#x is not rt286\n", val); return -ENODEV; } @@ -1183,8 +1180,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c, if (pdata) rt286->pdata = *pdata; - if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) || - dmi_check_system(force_combo_jack_table)) + if (dmi_check_system(force_combo_jack_table) || + dmi_check_system(dmi_dell_dino)) rt286->pdata.cbj_en = true; regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3); @@ -1223,7 +1220,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c, regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737); regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f); - if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) { + if (dmi_check_system(dmi_dell_dino)) { regmap_update_bits(rt286->regmap, RT286_SET_GPIO_MASK, 0x40, 0x40); regmap_update_bits(rt286->regmap, diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index db7734e45dd1..b1c8bb39cdf1 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -341,9 +341,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); -static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); -static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c index 883b93f0bd38..1d4031818966 100644 --- a/sound/soc/codecs/rt5651.c +++ b/sound/soc/codecs/rt5651.c @@ -286,9 +286,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg) } static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); -static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); +static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); -static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); +static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 321b1ac52bfd..a3dd7030f629 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -78,7 +78,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = { { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f }, { SGTL5000_DAP_MAIN_CHAN, 0x8000 }, { SGTL5000_DAP_MIX_CHAN, 0x0000 }, - { SGTL5000_DAP_AVC_CTRL, 0x5100 }, + { SGTL5000_DAP_AVC_CTRL, 0x0510 }, { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 }, { SGTL5000_DAP_AVC_ATTACK, 0x0028 }, { SGTL5000_DAP_AVC_DECAY, 0x0050 }, diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c index 71a1fde5a7ef..160d61a66204 100644 --- a/sound/soc/codecs/sti-sas.c +++ b/sound/soc/codecs/sti-sas.c @@ -542,7 +542,6 @@ static const struct of_device_id sti_sas_dev_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, sti_sas_dev_match); static int sti_sas_driver_probe(struct platform_device *pdev) { diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 5ff0d3b10bcf..f1f990b325ad 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -852,7 +852,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL); if (!ctl_work) { ret = -ENOMEM; - goto err_list_del; + goto err_ctl_cache; } ctl_work->dsp = dsp; @@ -862,8 +862,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp, return 0; -err_list_del: - list_del(&ctl->list); +err_ctl_cache: kfree(ctl->cache); err_ctl_name: kfree(ctl->name); diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index fad711a3f4b4..40075b9afb79 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -488,13 +488,11 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream, ESAI_SAICR_SYNC, esai_priv->synchronous ? ESAI_SAICR_SYNC : 0); - /* Set slots count */ + /* Set a default slot number -- 2 */ regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, - ESAI_xCCR_xDC_MASK, - ESAI_xCCR_xDC(esai_priv->slots)); + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, - ESAI_xCCR_xDC_MASK, - ESAI_xCCR_xDC(esai_priv->slots)); + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2)); } return 0; diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 72d454899484..ec731223cab3 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -90,21 +90,16 @@ static int pcm030_fabric_probe(struct platform_device *op) dev_err(&op->dev, "platform_device_alloc() failed\n"); ret = platform_device_add(pdata->codec_device); - if (ret) { + if (ret) dev_err(&op->dev, "platform_device_add() failed: %d\n", ret); - platform_device_put(pdata->codec_device); - } ret = snd_soc_register_card(card); - if (ret) { + if (ret) dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); - platform_device_del(pdata->codec_device); - platform_device_put(pdata->codec_device); - } platform_set_drvdata(op, pdata); - return ret; + return ret; } static int pcm030_fabric_remove(struct platform_device *op) diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index d0d338533eb6..edb244331e6e 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -134,7 +134,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, snd_pcm_uframes_t period_size; ssize_t periodbytes; ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); - u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); + u32 buffer_addr = virt_to_phys(substream->dma_buffer.area); channels = substream->runtime->channels; period_size = substream->runtime->period_size; @@ -240,6 +240,7 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, /* set codec params and inform SST driver the same */ sst_fill_pcm_params(substream, ¶m); sst_fill_alloc_params(substream, &alloc_params); + substream->runtime->dma_area = substream->dma_buffer.area; str_params.sparams = param; str_params.aparams = alloc_params; str_params.codec = SST_CODEC_TYPE_PCM; diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c index a0e67d5f5968..de955c2e8c4e 100644 --- a/sound/soc/intel/boards/haswell.c +++ b/sound/soc/intel/boards/haswell.c @@ -197,7 +197,6 @@ static struct platform_driver haswell_audio = { .probe = haswell_audio_probe, .driver = { .name = "haswell-audio", - .pm = &snd_soc_pm_ops, }, }; diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c index 6440729facaf..0dc1ab48fceb 100644 --- a/sound/soc/jz4740/jz4740-i2s.c +++ b/sound/soc/jz4740/jz4740-i2s.c @@ -315,14 +315,10 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); - if (IS_ERR(parent)) - return PTR_ERR(parent); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); - if (IS_ERR(parent)) - return PTR_ERR(parent); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c index c94a39081645..d0333f667f93 100644 --- a/sound/soc/msm/msm8998.c +++ b/sound/soc/msm/msm8998.c @@ -7583,33 +7583,6 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = { .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, - /* Proxy Tx BACK END DAI Link */ - { - .name = LPASS_BE_PROXY_TX, - .stream_name = "Proxy Capture", - .cpu_dai_name = "msm-dai-q6-dev.8195", - .platform_name = "msm-pcm-routing", - .codec_name = "msm-stub-codec.1", - .codec_dai_name = "msm-stub-tx", - .no_pcm = 1, - .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_PROXY_TX, - .ignore_suspend = 1, - }, - /* Proxy Rx BACK END DAI Link */ - { - .name = LPASS_BE_PROXY_RX, - .stream_name = "Proxy Playback", - .cpu_dai_name = "msm-dai-q6-dev.8194", - .platform_name = "msm-pcm-routing", - .codec_name = "msm-stub-codec.1", - .codec_dai_name = "msm-stub-rx", - .no_pcm = 1, - .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_PROXY_RX, - .ignore_pmdown_time = 1, - .ignore_suspend = 1, - }, { .name = LPASS_BE_USB_AUDIO_RX, .stream_name = "USB Audio Playback", diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 6fa7aad66bd7..506a51f6d983 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16415,9 +16415,9 @@ static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol, memset(app_type_cfg, 0, MAX_APP_TYPES* sizeof(struct msm_pcm_routing_app_type_data)); - if (num_app_types > MAX_APP_TYPES || num_app_types < 0) { - pr_err("%s: number of app types %d is invalid\n", - __func__, num_app_types); + if (num_app_types > MAX_APP_TYPES) { + pr_err("%s: number of app types exceed the max supported\n", + __func__); return -EINVAL; } for (j = 0; j < num_app_types; j++) { @@ -16452,10 +16452,9 @@ static int msm_routing_put_lsm_app_type_cfg_control( int i = 0, j; int num_app_types; - if (ucontrol->value.integer.value[0] < 0 || - ucontrol->value.integer.value[0] > MAX_APP_TYPES) { - pr_err("%s: number of app types %ld is invalid\n", - __func__, ucontrol->value.integer.value[0]); + if (ucontrol->value.integer.value[0] > MAX_APP_TYPES) { + pr_err("%s: number of app types exceed the max supported\n", + __func__); return -EINVAL; } diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c index d9cd9350ffbe..4ed29ffc1c54 100644 --- a/sound/soc/samsung/idma.c +++ b/sound/soc/samsung/idma.c @@ -370,8 +370,6 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) buf->addr = idma.lp_tx_addr; buf->bytes = idma_hardware.buffer_bytes_max; buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes); - if (!buf->area) - return -ENOMEM; return 0; } diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 388a1edeec4c..275df1d7a138 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3423,7 +3423,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, if (!routes) { dev_err(card->dev, "ASoC: Could not allocate DAPM route table\n"); - return -ENOMEM; + return -EINVAL; } for (i = 0; i < num_routes; i++) { diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 483187579399..eea6585925f3 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -2332,7 +2332,6 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) enum snd_soc_dapm_direction dir; list_del(&w->list); - list_del(&w->dirty); /* * remove source and sink paths associated to this widget. * While removing the path, remove reference to it from both @@ -2389,16 +2388,10 @@ static struct snd_soc_dapm_widget *dapm_find_widget( return NULL; } -/* - * set the DAPM pin status: - * returns 1 when the value has been updated, 0 when unchanged, or a negative - * error code; called from kcontrol put callback - */ -static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, - const char *pin, int status) +static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, + const char *pin, int status) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); - int ret = 0; dapm_assert_locked(dapm); @@ -2411,26 +2404,13 @@ static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, dapm_mark_dirty(w, "pin configuration"); dapm_widget_invalidate_input_paths(w); dapm_widget_invalidate_output_paths(w); - ret = 1; } w->connected = status; if (status == 0) w->force = 0; - return ret; -} - -/* - * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful; - * called from several API functions below - */ -static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, - const char *pin, int status) -{ - int ret = __snd_soc_dapm_set_pin(dapm, pin, status); - - return ret < 0 ? ret : 0; + return 0; } /** @@ -3324,15 +3304,14 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); const char *pin = (const char *)kcontrol->private_value; - int ret; - mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); - ret = __snd_soc_dapm_set_pin(&card->dapm, pin, - !!ucontrol->value.integer.value[0]); - mutex_unlock(&card->dapm_mutex); + if (ucontrol->value.integer.value[0]) + snd_soc_dapm_enable_pin(&card->dapm, pin); + else + snd_soc_dapm_disable_pin(&card->dapm, pin); snd_soc_dapm_sync(&card->dapm); - return ret; + return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch); @@ -3692,7 +3671,7 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol, w->params_select = ucontrol->value.enumerated.item[0]; - return 1; + return 0; } int snd_soc_dapm_new_pcm(struct snd_soc_card *card, diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 8e9a77bfa73a..793c4fd2c825 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -320,7 +320,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, unsigned int sign_bit = mc->sign_bit; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; - int err, ret; + int err; bool type_2r = false; unsigned int val2 = 0; unsigned int val, val_mask; @@ -328,27 +328,13 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (sign_bit) mask = BIT(sign_bit + 1) - 1; - val = ucontrol->value.integer.value[0]; - if (mc->platform_max && ((int)val + min) > mc->platform_max) - return -EINVAL; - if (val > max - min) - return -EINVAL; - if (val < 0) - return -EINVAL; - val = (val + min) & mask; + val = ((ucontrol->value.integer.value[0] + min) & mask); if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { - val2 = ucontrol->value.integer.value[1]; - if (mc->platform_max && ((int)val2 + min) > mc->platform_max) - return -EINVAL; - if (val2 > max - min) - return -EINVAL; - if (val2 < 0) - return -EINVAL; - val2 = (val2 + min) & mask; + val2 = ((ucontrol->value.integer.value[1] + min) & mask); if (invert) val2 = max - val2; if (reg == reg2) { @@ -362,18 +348,12 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; - ret = err; - if (type_2r) { + if (type_2r) err = snd_soc_component_update_bits(component, reg2, val_mask, - val2); - /* Don't discard any error code or drop change flag */ - if (ret == 0 || err < 0) { - ret = err; - } - } + val2); - return ret; + return err; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); @@ -448,15 +428,8 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, int err = 0; unsigned int val, val_mask, val2 = 0; - val = ucontrol->value.integer.value[0]; - if (mc->platform_max && val > mc->platform_max) - return -EINVAL; - if (val > max - min) - return -EINVAL; - if (val < 0) - return -EINVAL; val_mask = mask << shift; - val = (val + min) & mask; + val = (ucontrol->value.integer.value[0] + min) & mask; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); @@ -529,7 +502,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; - int err, ret; + int ret; if (invert) val = (max - ucontrol->value.integer.value[0]) & mask; @@ -538,10 +511,9 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - err = snd_soc_component_update_bits(component, reg, val_mask, val); - if (err < 0) - return err; - ret = err; + ret = snd_soc_component_update_bits(component, reg, val_mask, val); + if (ret < 0) + return ret; if (snd_soc_volsw_is_stereo(mc)) { if (invert) @@ -551,12 +523,8 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - err = snd_soc_component_update_bits(component, rreg, val_mask, + ret = snd_soc_component_update_bits(component, rreg, val_mask, val); - /* Don't discard any error code or drop change flag */ - if (ret == 0 || err < 0) { - ret = err; - } } return ret; @@ -927,8 +895,6 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, unsigned int i, regval, regmask; int err; - if (val < mc->min || val > mc->max) - return -EINVAL; if (invert) val = max - val; val &= mask; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 5845a3d53010..91a9fd644166 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -2264,7 +2264,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, true); break; case SNDRV_PCM_TRIGGER_STOP: @@ -2282,7 +2281,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: - case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, false); break; case SNDRV_PCM_TRIGGER_STOP: diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index ff12f2aa4e51..0675ab3fec6c 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1831,7 +1831,6 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); /* remove dynamic controls from the component driver */ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index) { - struct snd_card *card = comp->card->snd_card; struct snd_soc_dobj *dobj, *next_dobj; int pass = SOC_TPLG_PASS_END; @@ -1839,7 +1838,6 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index) while (pass >= SOC_TPLG_PASS_START) { /* remove mixer controls */ - down_write(&card->controls_rwsem); list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list, list) { @@ -1872,7 +1870,6 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index) break; } } - up_write(&card->controls_rwsem); pass--; } diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c index f40657da4db2..deb597f7c302 100644 --- a/sound/soc/tegra/tegra_alc5632.c +++ b/sound/soc/tegra/tegra_alc5632.c @@ -149,7 +149,6 @@ static struct snd_soc_dai_link tegra_alc5632_dai = { static struct snd_soc_card snd_soc_tegra_alc5632 = { .name = "tegra-alc5632", - .driver_name = "tegra", .owner = THIS_MODULE, .remove = tegra_alc5632_card_remove, .dai_link = &tegra_alc5632_dai, diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c index f4f238924c76..902da36581d1 100644 --- a/sound/soc/tegra/tegra_max98090.c +++ b/sound/soc/tegra/tegra_max98090.c @@ -205,7 +205,6 @@ static struct snd_soc_dai_link tegra_max98090_dai = { static struct snd_soc_card snd_soc_tegra_max98090 = { .name = "tegra-max98090", - .driver_name = "tegra", .owner = THIS_MODULE, .remove = tegra_max98090_card_remove, .dai_link = &tegra_max98090_dai, diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c index 1ff83d5835a3..773daecaa5e8 100644 --- a/sound/soc/tegra/tegra_rt5640.c +++ b/sound/soc/tegra/tegra_rt5640.c @@ -150,7 +150,6 @@ static struct snd_soc_dai_link tegra_rt5640_dai = { static struct snd_soc_card snd_soc_tegra_rt5640 = { .name = "tegra-rt5640", - .driver_name = "tegra", .owner = THIS_MODULE, .remove = tegra_rt5640_card_remove, .dai_link = &tegra_rt5640_dai, diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c index 451e7254e87b..1470873ecde6 100644 --- a/sound/soc/tegra/tegra_rt5677.c +++ b/sound/soc/tegra/tegra_rt5677.c @@ -198,7 +198,6 @@ static struct snd_soc_dai_link tegra_rt5677_dai = { static struct snd_soc_card snd_soc_tegra_rt5677 = { .name = "tegra-rt5677", - .driver_name = "tegra", .owner = THIS_MODULE, .remove = tegra_rt5677_card_remove, .dai_link = &tegra_rt5677_dai, diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c index 633d26c4811b..f0cd01dbfc38 100644 --- a/sound/soc/tegra/tegra_wm8753.c +++ b/sound/soc/tegra/tegra_wm8753.c @@ -110,7 +110,6 @@ static struct snd_soc_dai_link tegra_wm8753_dai = { static struct snd_soc_card snd_soc_tegra_wm8753 = { .name = "tegra-wm8753", - .driver_name = "tegra", .owner = THIS_MODULE, .dai_link = &tegra_wm8753_dai, .num_links = 1, diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index 09e07b321e9e..21604009bc1a 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c @@ -227,7 +227,6 @@ static struct snd_soc_dai_link tegra_wm8903_dai = { static struct snd_soc_card snd_soc_tegra_wm8903 = { .name = "tegra-wm8903", - .driver_name = "tegra", .owner = THIS_MODULE, .dai_link = &tegra_wm8903_dai, .num_links = 1, diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c index e5bebb473d95..6492f8143ff1 100644 --- a/sound/soc/tegra/tegra_wm9712.c +++ b/sound/soc/tegra/tegra_wm9712.c @@ -59,7 +59,6 @@ static struct snd_soc_dai_link tegra_wm9712_dai = { static struct snd_soc_card snd_soc_tegra_wm9712 = { .name = "tegra-wm9712", - .driver_name = "tegra", .owner = THIS_MODULE, .dai_link = &tegra_wm9712_dai, .num_links = 1, diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c index 90a770968f34..2cea203c4f5f 100644 --- a/sound/soc/tegra/trimslice.c +++ b/sound/soc/tegra/trimslice.c @@ -103,7 +103,6 @@ static struct snd_soc_dai_link trimslice_tlv320aic23_dai = { static struct snd_soc_card snd_soc_trimslice = { .name = "tegra-trimslice", - .driver_name = "tegra", .owner = THIS_MODULE, .dai_link = &trimslice_tlv320aic23_dai, .num_links = 1, diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c index c5c6d360843a..9312cd8a6fdd 100644 --- a/sound/synth/emux/emux.c +++ b/sound/synth/emux/emux.c @@ -101,7 +101,7 @@ int snd_emux_register(struct snd_emux *emu, struct snd_card *card, int index, ch emu->name = kstrdup(name, GFP_KERNEL); emu->voices = kcalloc(emu->max_voices, sizeof(struct snd_emux_voice), GFP_KERNEL); - if (emu->name == NULL || emu->voices == NULL) + if (emu->voices == NULL) return -ENOMEM; /* create soundfont list */ diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c index f29c115b9d56..161215d78d95 100644 --- a/sound/usb/6fire/comm.c +++ b/sound/usb/6fire/comm.c @@ -99,7 +99,7 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev) int actual_len; ret = usb_interrupt_msg(dev, usb_sndintpipe(dev, COMM_EP), - buffer, buffer[1] + 2, &actual_len, 1000); + buffer, buffer[1] + 2, &actual_len, HZ); if (ret < 0) return ret; else if (actual_len != buffer[1] + 2) diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c index 2809ba257fa4..62c25e74f0e5 100644 --- a/sound/usb/6fire/firmware.c +++ b/sound/usb/6fire/firmware.c @@ -166,7 +166,7 @@ static int usb6fire_fw_ezusb_write(struct usb_device *device, ret = usb_control_msg(device, usb_sndctrlpipe(device, 0), type, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, 0, data, len, 1000); + value, 0, data, len, HZ); if (ret < 0) return ret; else if (ret != len) @@ -179,7 +179,7 @@ static int usb6fire_fw_ezusb_read(struct usb_device *device, { int ret = usb_control_msg(device, usb_rcvctrlpipe(device, 0), type, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, - 0, data, len, 1000); + 0, data, len, HZ); if (ret < 0) return ret; else if (ret != len) @@ -194,7 +194,7 @@ static int usb6fire_fw_fpga_write(struct usb_device *device, int ret; ret = usb_bulk_msg(device, usb_sndbulkpipe(device, FPGA_EP), data, len, - &actual_len, 1000); + &actual_len, HZ); if (ret < 0) return ret; else if (actual_len != len) diff --git a/sound/usb/card.c b/sound/usb/card.c index f909da29ec08..8c731430249f 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -250,8 +250,9 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int ctrlif, interface); return -EINVAL; } - return usb_driver_claim_interface(&usb_audio_driver, iface, - USB_AUDIO_IFACE_UNUSED); + usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); + + return 0; } if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && @@ -271,8 +272,7 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int if (! snd_usb_parse_audio_interface(chip, interface)) { usb_set_interface(dev, interface, 0); /* reset the current interface */ - return usb_driver_claim_interface(&usb_audio_driver, iface, - USB_AUDIO_IFACE_UNUSED); + usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); } return 0; @@ -713,7 +713,7 @@ static void usb_audio_disconnect(struct usb_interface *intf) struct snd_card *card; struct list_head *p; - if (chip == USB_AUDIO_IFACE_UNUSED) + if (chip == (void *)-1L) return; card = chip->card; @@ -816,7 +816,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) struct usb_mixer_interface *mixer; struct list_head *p; - if (chip == USB_AUDIO_IFACE_UNUSED) + if (chip == (void *)-1L) return 0; if (!chip->num_suspended_intf++) { @@ -846,7 +846,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) struct list_head *p; int err = 0; - if (chip == USB_AUDIO_IFACE_UNUSED) + if (chip == (void *)-1L) return 0; atomic_inc(&chip->active); /* avoid autopm */ diff --git a/sound/usb/format.c b/sound/usb/format.c index 6e5d33dec07d..daff52e78317 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -53,8 +53,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, case UAC_VERSION_1: default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; - if (format >= 64) - return 0; /* invalid format */ sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; format = 1 << format; @@ -221,11 +219,9 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof continue; /* C-Media CM6501 mislabels its 96 kHz altsetting */ /* Terratec Aureon 7.1 USB C-Media 6206, too */ - /* Ozone Z90 USB C-Media, too */ if (rate == 48000 && nr_rates == 1 && (chip->usb_id == USB_ID(0x0d8c, 0x0201) || chip->usb_id == USB_ID(0x0d8c, 0x0102) || - chip->usb_id == USB_ID(0x0d8c, 0x0078) || chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && fp->altsetting == 5 && fp->maxpacksize == 392) rate = 96000; diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 898fba45f1b6..ae2c35918002 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -101,7 +101,7 @@ static int line6_send_raw_message(struct usb_line6 *line6, const char *buffer, usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w), (char *)frag_buf, frag_size, - &partial, LINE6_TIMEOUT); + &partial, LINE6_TIMEOUT * HZ); if (retval) { dev_err(line6->ifcdev, @@ -321,7 +321,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, (datalen << 8) | 0x21, address, - NULL, 0, LINE6_TIMEOUT); + NULL, 0, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, "read request failed (error %d)\n", ret); @@ -336,7 +336,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0012, 0x0000, len, 1, - LINE6_TIMEOUT); + LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, "receive length failed (error %d)\n", ret); @@ -364,7 +364,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0013, 0x0000, data, datalen, - LINE6_TIMEOUT); + LINE6_TIMEOUT * HZ); if (ret < 0) dev_err(line6->ifcdev, "read failed (error %d)\n", ret); @@ -396,7 +396,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0022, address, data, datalen, - LINE6_TIMEOUT); + LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, @@ -412,7 +412,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0012, 0x0000, - status, 1, LINE6_TIMEOUT); + status, 1, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h index ad845d488d49..7da643e79e3b 100644 --- a/sound/usb/line6/driver.h +++ b/sound/usb/line6/driver.h @@ -24,7 +24,7 @@ #define LINE6_FALLBACK_INTERVAL 10 #define LINE6_FALLBACK_MAXPACKETSIZE 16 -#define LINE6_TIMEOUT 1000 +#define LINE6_TIMEOUT 1 #define LINE6_BUFSIZE_LISTEN 32 #define LINE6_MESSAGE_MAXLEN 256 diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index 9cc512d7f3e1..5512b3d532e7 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -133,7 +133,7 @@ static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - cmd1, cmd2, NULL, 0, LINE6_TIMEOUT); + cmd1, cmd2, NULL, 0, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(&usbdev->dev, "send failed (error %d)\n", ret); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 2b8c56c6f2b7..934540042bc2 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1865,12 +1865,6 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, ms_ep = find_usb_ms_endpoint_descriptor(hostep); if (!ms_ep) continue; - if (ms_ep->bLength <= sizeof(*ms_ep)) - continue; - if (ms_ep->bNumEmbMIDIJack > 0x10) - continue; - if (ms_ep->bLength < sizeof(*ms_ep) + ms_ep->bNumEmbMIDIJack) - continue; if (usb_endpoint_dir_out(ep)) { if (endpoints[epidx].out_ep) { if (++epidx >= MIDI_MAX_ENDPOINTS) { @@ -2123,8 +2117,6 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi, cs_desc[1] == USB_DT_CS_INTERFACE && cs_desc[2] == 0xf1 && cs_desc[3] == 0x02) { - if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10) - continue; endpoint->in_cables = (1 << cs_desc[4]) - 1; endpoint->out_cables = (1 << cs_desc[5]) - 1; return snd_usbmidi_detect_endpoints(umidi, endpoint, 1); diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c index 3ba5d0afa947..9581089c28c5 100644 --- a/sound/usb/misc/ua101.c +++ b/sound/usb/misc/ua101.c @@ -1032,7 +1032,7 @@ static int detect_usb_format(struct ua101 *ua) fmt_playback->bSubframeSize * ua->playback.channels; epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc; - if (!usb_endpoint_is_isoc_in(epd) || usb_endpoint_maxp(epd) == 0) { + if (!usb_endpoint_is_isoc_in(epd)) { dev_err(&ua->dev->dev, "invalid capture endpoint\n"); return -ENXIO; } @@ -1040,7 +1040,7 @@ static int detect_usb_format(struct ua101 *ua) ua->capture.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize); epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc; - if (!usb_endpoint_is_isoc_out(epd) || usb_endpoint_maxp(epd) == 0) { + if (!usb_endpoint_is_isoc_out(epd)) { dev_err(&ua->dev->dev, "invalid playback endpoint\n"); return -ENXIO; } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index a56eb871784b..65552875692c 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -324,7 +324,6 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, struct usb_host_interface *alts; struct usb_interface *iface; unsigned int ep; - unsigned int ifnum; /* Implicit feedback sync EPs consumers are always playback EPs */ if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK) @@ -335,19 +334,34 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */ ep = 0x81; - ifnum = 3; - goto add_sync_ep_from_ifnum; + iface = usb_ifnum_to_if(dev, 3); + + if (!iface || iface->num_altsetting == 0) + return -EINVAL; + + alts = &iface->altsetting[1]; + goto add_sync_ep; + break; case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */ case USB_ID(0x0763, 0x2081): ep = 0x81; - ifnum = 2; - goto add_sync_ep_from_ifnum; + iface = usb_ifnum_to_if(dev, 2); + + if (!iface || iface->num_altsetting == 0) + return -EINVAL; + + alts = &iface->altsetting[1]; + goto add_sync_ep; case USB_ID(0x1397, 0x0002): ep = 0x81; - ifnum = 1; - goto add_sync_ep_from_ifnum; - } + iface = usb_ifnum_to_if(dev, 1); + + if (!iface || iface->num_altsetting == 0) + return -EINVAL; + alts = &iface->altsetting[1]; + goto add_sync_ep; + } if (attr == USB_ENDPOINT_SYNC_ASYNC && altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && altsd->bInterfaceProtocol == 2 && @@ -362,14 +376,6 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, /* No quirk */ return 0; -add_sync_ep_from_ifnum: - iface = usb_ifnum_to_if(dev, ifnum); - - if (!iface || iface->num_altsetting < 2) - return -EINVAL; - - alts = &iface->altsetting[1]; - add_sync_ep: subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip, alts, ep, !subs->direction, diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 1904fc542025..a917b7e02d31 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2479,16 +2479,6 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, -{ - USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204), - .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { - .vendor_name = "KORG, Inc.", - /* .product_name = "ToneLab EX", */ - .ifnum = 3, - .type = QUIRK_MIDI_STANDARD_INTERFACE, - } -}, - /* AKAI devices */ { USB_DEVICE(0x09e8, 0x0062), @@ -3446,37 +3436,5 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } } }, -{ - /* - * Sennheiser GSP670 - * Change order of interfaces loaded - */ - USB_DEVICE(0x1395, 0x0300), - .bInterfaceClass = USB_CLASS_PER_INTERFACE, - .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { - .ifnum = QUIRK_ANY_INTERFACE, - .type = QUIRK_COMPOSITE, - .data = &(const struct snd_usb_audio_quirk[]) { - // Communication - { - .ifnum = 3, - .type = QUIRK_AUDIO_STANDARD_INTERFACE - }, - // Recording - { - .ifnum = 4, - .type = QUIRK_AUDIO_STANDARD_INTERFACE - }, - // Main - { - .ifnum = 1, - .type = QUIRK_AUDIO_STANDARD_INTERFACE - }, - { - .ifnum = -1 - } - } - } -}, #undef USB_DEVICE_VENDOR_SPEC diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 7979a9e19c53..59529a9cab61 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -66,12 +66,8 @@ static int create_composite_quirk(struct snd_usb_audio *chip, if (!iface) continue; if (quirk->ifnum != probed_ifnum && - !usb_interface_claimed(iface)) { - err = usb_driver_claim_interface(driver, iface, - USB_AUDIO_IFACE_UNUSED); - if (err < 0) - return err; - } + !usb_interface_claimed(iface)) + usb_driver_claim_interface(driver, iface, (void *)-1L); } return 0; @@ -403,12 +399,8 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip, continue; err = create_autodetect_quirk(chip, iface, driver); - if (err >= 0) { - err = usb_driver_claim_interface(driver, iface, - USB_AUDIO_IFACE_UNUSED); - if (err < 0) - return err; - } + if (err >= 0) + usb_driver_claim_interface(driver, iface, (void *)-1L); } return 0; @@ -1162,8 +1154,6 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */ case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ - case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */ - case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */ return true; } return false; diff --git a/sound/usb/stream.c b/sound/usb/stream.c index f1186ba3958c..0204fe014a5b 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -193,16 +193,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct snd_usb_substream *subs = info->private_data; struct snd_pcm_chmap_elem *chmap = NULL; - int i = 0; + int i; + memset(ucontrol->value.integer.value, 0, + sizeof(ucontrol->value.integer.value)); if (subs->cur_audiofmt) chmap = subs->cur_audiofmt->chmap; if (chmap) { for (i = 0; i < chmap->channels; i++) ucontrol->value.integer.value[i] = chmap->map[i]; } - for (; i < subs->channels_max; i++) - ucontrol->value.integer.value[i] = 0; return 0; } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 2c70e0961e3d..ad8e09825866 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -66,8 +66,6 @@ struct snd_usb_audio { void (*disconnect_cb)(struct snd_usb_audio *chip); }; -#define USB_AUDIO_IFACE_UNUSED ((void *)-1L) - #define usb_audio_err(chip, fmt, args...) \ dev_err(&(chip)->dev->dev, fmt, ##args) #define usb_audio_warn(chip, fmt, args...) \ diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h index 94ae4a333a35..e4422b4b634e 100644 --- a/tools/arch/ia64/include/asm/barrier.h +++ b/tools/arch/ia64/include/asm/barrier.h @@ -38,6 +38,9 @@ * sequential memory pages only. */ +/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */ +#define ia64_mf() asm volatile ("mf" ::: "memory") + #define mb() ia64_mf() #define rmb() mb() #define wmb() mb() diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c index 661cca25ae5d..6ebfdee3e2c6 100644 --- a/tools/perf/tests/bpf.c +++ b/tools/perf/tests/bpf.c @@ -1,5 +1,4 @@ #include -#include #include #include #include @@ -177,7 +176,6 @@ static int __test__bpf(int idx) bpf_testcase_table[idx].target_func, bpf_testcase_table[idx].expect_result); out: - free(obj_buf); bpf__clear(); return ret; } diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c index bdef02599b4e..30c02181e78b 100644 --- a/tools/perf/tests/sample-parsing.c +++ b/tools/perf/tests/sample-parsing.c @@ -167,7 +167,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) .data = {1, 211, 212, 213}, }; u64 regs[64]; - const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 }; + const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; struct perf_sample sample = { .ip = 101, diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 6a3e53716036..960de8951884 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -240,6 +240,10 @@ static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, queue->set = true; queue->tid = buffer->tid; queue->cpu = buffer->cpu; + } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { + pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", + queue->cpu, queue->tid, buffer->cpu, buffer->tid); + return -EINVAL; } buffer->buffer_nr = queues->next_buffer_nr++; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 28f9e88c65ba..c1944765533c 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1478,9 +1478,6 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) break; case INTEL_PT_CYC: - intel_pt_calc_cyc_timestamp(decoder); - break; - case INTEL_PT_VMCS: case INTEL_PT_MNT: case INTEL_PT_PAD: diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index e86fd1b38448..2a51212d5e49 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -88,7 +88,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) if (!strncmp(filename, "/system/lib/", 12)) { char *ndk, *app; const char *arch; - int ndk_length, app_length; + size_t ndk_length; + size_t app_length; ndk = getenv("NDK_ROOT"); app = getenv("APP_PLATFORM"); @@ -116,8 +117,8 @@ static inline bool replace_android_lib(const char *filename, char *newfilename) if (new_length > PATH_MAX) return false; snprintf(newfilename, new_length, - "%.*s/platforms/%.*s/arch-%s/usr/lib/%s", - ndk_length, ndk, app_length, app, arch, libname); + "%s/platforms/%s/arch-%s/usr/lib/%s", + ndk, app, arch, libname); return true; } diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c index a8865d1c3e81..4f2c1c255d81 100644 --- a/tools/perf/util/parse-regs-options.c +++ b/tools/perf/util/parse-regs-options.c @@ -40,7 +40,7 @@ parse_regs(const struct option *opt, const char *str, int unset) } fputc('\n', stderr); /* just printing available regs */ - goto error; + return -1; } for (r = sample_reg_masks; r->name; r++) { if (!strcasecmp(s, r->name)) diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index 7476757680ed..e3b3b92e4458 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -318,10 +318,10 @@ int probe_file__del_events(int fd, struct strfilter *filter) ret = probe_file__get_events(fd, filter, namelist); if (ret < 0) - goto out; + return ret; ret = probe_file__del_strlist(fd, namelist); -out: strlist__delete(namelist); + return ret; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 4e6db9c08339..24bb782886eb 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1229,7 +1229,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset, if (event->header.size < hdr_sz || event->header.size > buf_sz) return -1; - buf += hdr_sz; rest = event->header.size - hdr_sz; if (readn(fd, buf, rest) != (ssize_t)rest) diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index d8fa6c72b7ca..50a93f5f13d6 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -1,10 +1,6 @@ # This mimics the top-level Makefile. We do it explicitly here so that this # Makefile can operate with or without the kbuild infrastructure. -ifneq ($(LLVM),) -CC := clang -else CC := $(CROSS_COMPILE)gcc -endif define RUN_TESTS @for TEST in $(TEST_PROGS); do \ diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c index 87630d44fb4c..8341d7778d5e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c @@ -50,6 +50,8 @@ static int no_handler_test(void) event_close(&event); + dump_ebb_state(); + /* The real test is that we never took an EBB at 0x0 */ return 0; diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index d4652e295ff8..9399c4aeaa26 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -1,6 +1,9 @@ #!/bin/bash TCID="zram.sh" +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + . ./zram_lib.sh run_zram () { @@ -14,4 +17,14 @@ echo "" check_prereqs -run_zram +# check zram module exists +MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko +if [ -f $MODULE_PATH ]; then + run_zram +elif [ -b /dev/zram0 ]; then + run_zram +else + echo "$TCID : No zram.ko module or /dev/zram0 device file not found" + echo "$TCID : CONFIG_ZRAM is not set" + exit $ksft_skip +fi diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh index 8abc9965089d..b9566a6478a9 100755 --- a/tools/testing/selftests/zram/zram01.sh +++ b/tools/testing/selftests/zram/zram01.sh @@ -42,7 +42,9 @@ zram_algs="lzo" zram_fill_fs() { - for i in $(seq $dev_start $dev_end); do + local mem_free0=$(free -m | awk 'NR==2 {print $4}') + + for i in $(seq 0 $(($dev_num - 1))); do echo "fill zram$i..." local b=0 while [ true ]; do @@ -52,17 +54,29 @@ zram_fill_fs() b=$(($b + 1)) done echo "zram$i can be filled with '$b' KB" + done - local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"` - local v=$((100 * 1024 * $b / $mem_used_total)) - if [ "$v" -lt 100 ]; then - echo "FAIL compression ratio: 0.$v:1" - ERR_CODE=-1 - return - fi + local mem_free1=$(free -m | awk 'NR==2 {print $4}') + local used_mem=$(($mem_free0 - $mem_free1)) - echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" + local total_size=0 + for sm in $zram_sizes; do + local s=$(echo $sm | sed 's/M//') + total_size=$(($total_size + $s)) done + + echo "zram used ${used_mem}M, zram disk sizes ${total_size}M" + + local v=$((100 * $total_size / $used_mem)) + + if [ "$v" -lt 100 ]; then + echo "FAIL compression ratio: 0.$v:1" + ERR_CODE=-1 + zram_cleanup + return + fi + + echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" } check_prereqs @@ -76,6 +90,7 @@ zram_mount zram_fill_fs zram_cleanup +zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh index 3768cfd2e5f8..74569b883737 100755 --- a/tools/testing/selftests/zram/zram02.sh +++ b/tools/testing/selftests/zram/zram02.sh @@ -45,6 +45,7 @@ zram_set_memlimit zram_makeswap zram_swapoff zram_cleanup +zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 130d193cbd72..9e73a4fb9b0a 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -14,17 +14,12 @@ # Author: Alexey Kodanev # Modified: Naresh Kamboju +MODULE=0 dev_makeswap=-1 dev_mounted=-1 -dev_start=0 -dev_end=-1 -module_load=-1 -sys_control=-1 + # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 -kernel_version=`uname -r | cut -d'.' -f1,2` -kernel_major=${kernel_version%.*} -kernel_minor=${kernel_version#*.} trap INT @@ -39,104 +34,68 @@ check_prereqs() fi } -kernel_gte() -{ - major=${1%.*} - minor=${1#*.} - - if [ $kernel_major -gt $major ]; then - return 0 - elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then - return 0 - fi - - return 1 -} - zram_cleanup() { echo "zram cleanup" local i= - for i in $(seq $dev_start $dev_makeswap); do + for i in $(seq 0 $dev_makeswap); do swapoff /dev/zram$i done - for i in $(seq $dev_start $dev_mounted); do + for i in $(seq 0 $dev_mounted); do umount /dev/zram$i done - for i in $(seq $dev_start $dev_end); do + for i in $(seq 0 $(($dev_num - 1))); do echo 1 > /sys/block/zram${i}/reset rm -rf zram$i done - if [ $sys_control -eq 1 ]; then - for i in $(seq $dev_start $dev_end); do - echo $i > /sys/class/zram-control/hot_remove - done - fi +} - if [ $module_load -eq 1 ]; then +zram_unload() +{ + if [ $MODULE -ne 0 ] ; then + echo "zram rmmod zram" rmmod zram > /dev/null 2>&1 fi } zram_load() { - echo "create '$dev_num' zram device(s)" - - # zram module loaded, new kernel - if [ -d "/sys/class/zram-control" ]; then - echo "zram modules already loaded, kernel supports" \ - "zram-control interface" - dev_start=$(ls /dev/zram* | wc -w) - dev_end=$(($dev_start + $dev_num - 1)) - sys_control=1 - - for i in $(seq $dev_start $dev_end); do - cat /sys/class/zram-control/hot_add > /dev/null - done - - echo "all zram devices (/dev/zram$dev_start~$dev_end" \ - "successfully created" - return 0 - fi + # check zram module exists + MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko + if [ -f $MODULE_PATH ]; then + MODULE=1 + echo "create '$dev_num' zram device(s)" + modprobe zram num_devices=$dev_num + if [ $? -ne 0 ]; then + echo "failed to insert zram module" + exit 1 + fi + + dev_num_created=$(ls /dev/zram* | wc -w) - # detect old kernel or built-in - modprobe zram num_devices=$dev_num - if [ ! -d "/sys/class/zram-control" ]; then - if grep -q '^zram' /proc/modules; then - rmmod zram > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "zram module is being used on old kernel" \ - "without zram-control interface" - exit $ksft_skip - fi + if [ "$dev_num_created" -ne "$dev_num" ]; then + echo "unexpected num of devices: $dev_num_created" + ERR_CODE=-1 else - echo "test needs CONFIG_ZRAM=m on old kernel without" \ - "zram-control interface" - exit $ksft_skip + echo "zram load module successful" fi - modprobe zram num_devices=$dev_num + elif [ -b /dev/zram0 ]; then + echo "/dev/zram0 device file found: OK" + else + echo "ERROR: No zram.ko module or no /dev/zram0 device found" + echo "$TCID : CONFIG_ZRAM is not set" + exit 1 fi - - module_load=1 - dev_end=$(($dev_num - 1)) - echo "all zram devices (/dev/zram0~$dev_end) successfully created" } zram_max_streams() { echo "set max_comp_streams to zram device(s)" - kernel_gte 4.7 - if [ $? -eq 0 ]; then - echo "The device attribute max_comp_streams was"\ - "deprecated in 4.7" - return 0 - fi - - local i=$dev_start + local i=0 for max_s in $zram_max_streams; do local sys_path="/sys/block/zram${i}/max_comp_streams" echo $max_s > $sys_path || \ @@ -148,7 +107,7 @@ zram_max_streams() echo "FAIL can't set max_streams '$max_s', get $max_stream" i=$(($i + 1)) - echo "$sys_path = '$max_streams'" + echo "$sys_path = '$max_streams' ($i/$dev_num)" done echo "zram max streams: OK" @@ -158,16 +117,15 @@ zram_compress_alg() { echo "test that we can set compression algorithm" - local i=$dev_start - local algs=$(cat /sys/block/zram${i}/comp_algorithm) + local algs=$(cat /sys/block/zram0/comp_algorithm) echo "supported algs: $algs" - + local i=0 for alg in $zram_algs; do local sys_path="/sys/block/zram${i}/comp_algorithm" echo "$alg" > $sys_path || \ echo "FAIL can't set '$alg' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$alg'" + echo "$sys_path = '$alg' ($i/$dev_num)" done echo "zram set compression algorithm: OK" @@ -176,14 +134,14 @@ zram_compress_alg() zram_set_disksizes() { echo "set disk size to zram device(s)" - local i=$dev_start + local i=0 for ds in $zram_sizes; do local sys_path="/sys/block/zram${i}/disksize" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds'" + echo "$sys_path = '$ds' ($i/$dev_num)" done echo "zram set disksizes: OK" @@ -193,14 +151,14 @@ zram_set_memlimit() { echo "set memory limit to zram device(s)" - local i=$dev_start + local i=0 for ds in $zram_mem_limits; do local sys_path="/sys/block/zram${i}/mem_limit" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds'" + echo "$sys_path = '$ds' ($i/$dev_num)" done echo "zram set memory limit: OK" @@ -209,8 +167,8 @@ zram_set_memlimit() zram_makeswap() { echo "make swap with zram device(s)" - local i=$dev_start - for i in $(seq $dev_start $dev_end); do + local i=0 + for i in $(seq 0 $(($dev_num - 1))); do mkswap /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -233,7 +191,7 @@ zram_makeswap() zram_swapoff() { local i= - for i in $(seq $dev_start $dev_end); do + for i in $(seq 0 $dev_makeswap); do swapoff /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -247,7 +205,7 @@ zram_swapoff() zram_makefs() { - local i=$dev_start + local i=0 for fs in $zram_filesystems; do # if requested fs not supported default it to ext2 which mkfs.$fs > /dev/null 2>&1 || fs=ext2 @@ -266,7 +224,7 @@ zram_makefs() zram_mount() { local i=0 - for i in $(seq $dev_start $dev_end); do + for i in $(seq 0 $(($dev_num - 1))); do echo "mount /dev/zram$i" mkdir zram$i mount /dev/zram$i zram$i > /dev/null || \ diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c index 18c895654e76..0692d99b6d8f 100644 --- a/tools/usb/testusb.c +++ b/tools/usb/testusb.c @@ -278,6 +278,12 @@ nomem: } entry->ifnum = ifnum; + + /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */ + + fprintf(stderr, "%s speed\t%s\t%u\n", + speed(entry->speed), entry->name, entry->ifnum); + entry->next = testdevs; testdevs = entry; return 0; @@ -306,14 +312,6 @@ static void *handle_testdev (void *arg) return 0; } - status = ioctl(fd, USBDEVFS_GET_SPEED, NULL); - if (status < 0) - fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status); - else - dev->speed = status; - fprintf(stderr, "%s speed\t%s\t%u\n", - speed(dev->speed), dev->name, dev->ifnum); - restart: for (i = 0; i < TEST_CASES; i++) { if (dev->test != -1 && dev->test != i) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b0326734a980..ba8e8840b94b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -346,8 +346,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, */ kvm->mmu_notifier_count++; need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); + need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ - if (need_tlb_flush || kvm->tlbs_dirty) + if (need_tlb_flush) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); -- GitLab