diff --git a/Documentation/ABI/testing/dev-kmsg b/Documentation/ABI/testing/dev-kmsg index 1e6c28b1942b..f307506eb54c 100644 --- a/Documentation/ABI/testing/dev-kmsg +++ b/Documentation/ABI/testing/dev-kmsg @@ -56,11 +56,6 @@ Description: The /dev/kmsg character device node provides userspace access seek after the last record available at the time the last SYSLOG_ACTION_CLEAR was issued. - Due to the record nature of this interface with a "read all" - behavior and the specific positions each seek operation sets, - SEEK_CUR is not supported, returning -ESPIPE (invalid seek) to - errno whenever requested. - The output format consists of a prefix carrying the syslog prefix including priority and facility, the 64 bit message sequence number and the monotonic timestamp in microseconds, diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem new file mode 100644 index 000000000000..5b10d036a8d4 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem @@ -0,0 +1,27 @@ +What: /sys/bus/nd/devices/nmemX/papr/flags +Date: Apr, 2020 +KernelVersion: v5.8 +Contact: linuxppc-dev , linux-nvdimm@lists.01.org, +Description: + (RO) Report flags indicating various states of a + papr-pmem NVDIMM device. Each flag maps to a one or + more bits set in the dimm-health-bitmap retrieved in + response to H_SCM_HEALTH hcall. The details of the bit + flags returned in response to this hcall is available + at 'Documentation/powerpc/papr_hcalls.rst' . Below are + the flags reported in this sysfs file: + + * "not_armed" : Indicates that NVDIMM contents will not + survive a power cycle. + * "flush_fail" : Indicates that NVDIMM contents + couldn't be flushed during last + shut-down event. + * "restore_fail": Indicates that NVDIMM contents + couldn't be restored during NVDIMM + initialization. + * "encrypted" : NVDIMM contents are encrypted. + * "smart_notify": There is health event for the NVDIMM. + * "scrubbed" : Indicating that contents of the + NVDIMM have been scrubbed. + * "locked" : Indicating that NVDIMM contents cant + be modified until next power cycle. diff --git a/Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg b/Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg index 151c59578db4..f58cfb06b160 100644 --- a/Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg +++ b/Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg @@ -1,6 +1,6 @@ What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req Date: Feb 2014 -Contact: Li Jun +Contact: Li Jun Description: Can be set and read. Set a_bus_req(A-device bus request) input to be 1 if @@ -17,7 +17,7 @@ Description: What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop Date: Feb 2014 -Contact: Li Jun +Contact: Li Jun Description: Can be set and read The a_bus_drop(A-device bus drop) input is 1 when the @@ -32,7 +32,7 @@ Description: What: /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req Date: Feb 2014 -Contact: Li Jun +Contact: Li Jun Description: Can be set and read. The b_bus_req(B-device bus request) input is 1 during the time @@ -47,7 +47,7 @@ Description: What: /sys/bus/platform/devices/ci_hdrc.0/inputs/a_clr_err Date: Feb 2014 -Contact: Li Jun +Contact: Li Jun Description: Only can be set. The a_clr_err(A-device Vbus error clear) input is used to clear diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index ce3e05e41724..d09471aa7443 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1356,8 +1356,8 @@ PAGE_SIZE multiple when read back. thp_fault_alloc Number of transparent hugepages which were allocated to satisfy - a page fault, including COW faults. This counter is not present - when CONFIG_TRANSPARENT_HUGEPAGE is not set. + a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE + is not set. thp_collapse_alloc Number of transparent hugepages which were allocated to allow diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst index ec62fcc8eece..6cf8adc86fa8 100644 --- a/Documentation/admin-guide/device-mapper/index.rst +++ b/Documentation/admin-guide/device-mapper/index.rst @@ -11,6 +11,7 @@ Device Mapper dm-clone dm-crypt dm-dust + dm-ebs dm-flakey dm-init dm-integrity diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 6a233e42be08..b2acd0d395ca 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -305,8 +305,7 @@ monitor how successfully the system is providing huge pages for use. thp_fault_alloc is incremented every time a huge page is successfully - allocated to handle a page fault. This applies to both the - first time a page is faulted and for COW faults. + allocated to handle a page fault. thp_collapse_alloc is incremented by khugepaged when it has found diff --git a/Documentation/arm64/sve.rst b/Documentation/arm64/sve.rst index 5689c74c8082..bfd55f468258 100644 --- a/Documentation/arm64/sve.rst +++ b/Documentation/arm64/sve.rst @@ -186,7 +186,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg) flags: - PR_SVE_SET_VL_INHERIT + PR_SVE_VL_INHERIT Inherit the current vector length across execve(). Otherwise, the vector length is reset to the system default at execve(). (See @@ -247,7 +247,7 @@ prctl(PR_SVE_GET_VL) The following flag may be OR-ed into the result: - PR_SVE_SET_VL_INHERIT + PR_SVE_VL_INHERIT Vector length will be inherited across execve(). @@ -393,7 +393,7 @@ The regset data starts with struct user_sve_header, containing: * At every execve() call, the new vector length of the new process is set to the system default vector length, unless - * PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the + * PR_SVE_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the calling thread, or * a deferred vector length change is pending, established via the diff --git a/Documentation/bpf/prog_cgroup_sockopt.rst b/Documentation/bpf/prog_cgroup_sockopt.rst index c47d974629ae..172f957204bf 100644 --- a/Documentation/bpf/prog_cgroup_sockopt.rst +++ b/Documentation/bpf/prog_cgroup_sockopt.rst @@ -86,6 +86,20 @@ then the next program in the chain (A) will see those changes, *not* the original input ``setsockopt`` arguments. The potentially modified values will be then passed down to the kernel. +Large optval +============ +When the ``optval`` is greater than the ``PAGE_SIZE``, the BPF program +can access only the first ``PAGE_SIZE`` of that data. So it has to options: + +* Set ``optlen`` to zero, which indicates that the kernel should + use the original buffer from the userspace. Any modifications + done by the BPF program to the ``optval`` are ignored. +* Set ``optlen`` to the value less than ``PAGE_SIZE``, which + indicates that the kernel should use BPF's trimmed ``optval``. + +When the BPF program returns with the ``optlen`` greater than +``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno. + Example ======= diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst index 6068266dd303..7ca8c7bac650 100644 --- a/Documentation/core-api/pin_user_pages.rst +++ b/Documentation/core-api/pin_user_pages.rst @@ -33,7 +33,7 @@ all combinations of get*(), pin*(), FOLL_LONGTERM, and more. Also, the pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so that's a natural dividing line, and a good point to make separate wrapper calls. In other words, use pin_user_pages*() for DMA-pinned pages, and -get_user_pages*() for other cases. There are four cases described later on in +get_user_pages*() for other cases. There are five cases described later on in this document, to further clarify that concept. FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However, diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst index ce4bbd918648..b38379f06194 100644 --- a/Documentation/dev-tools/kcsan.rst +++ b/Documentation/dev-tools/kcsan.rst @@ -114,12 +114,6 @@ the below options are available: To dynamically limit for which functions to generate reports, see the `DebugFS interface`_ blacklist/whitelist feature. - For ``__always_inline`` functions, replace ``__always_inline`` with - ``__no_kcsan_or_inline`` (which implies ``__always_inline``):: - - static __no_kcsan_or_inline void foo(void) { - ... - * To disable data race detection for a particular compilation unit, add to the ``Makefile``:: diff --git a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml index 9147df29022a..38efb50081e3 100644 --- a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml +++ b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml @@ -34,12 +34,15 @@ properties: maxItems: 1 clocks: - maxItems: 1 + minItems: 1 + maxItems: 2 + items: + - description: controller register bus clock + - description: baud rate generator and delay control clock clock-names: - description: input clock for the baud rate generator - items: - - const: core + minItems: 1 + maxItems: 2 if: properties: @@ -51,17 +54,22 @@ if: then: properties: clocks: - contains: - items: - - description: controller register bus clock - - description: baud rate generator and delay control clock + minItems: 2 clock-names: - minItems: 2 items: - const: core - const: pclk +else: + properties: + clocks: + maxItems: 1 + + clock-names: + items: + - const: core + required: - compatible - reg diff --git a/Documentation/driver-api/media/v4l2-subdev.rst b/Documentation/driver-api/media/v4l2-subdev.rst index 6e71f67455bb..bc7e1fc40a9d 100644 --- a/Documentation/driver-api/media/v4l2-subdev.rst +++ b/Documentation/driver-api/media/v4l2-subdev.rst @@ -451,7 +451,7 @@ The bridge driver also has some helper functions it can use: "module_foo", "chipid", 0x36, NULL); This loads the given module (can be ``NULL`` if no module needs to be loaded) -and calls :c:func:`i2c_new_device` with the given ``i2c_adapter`` and +and calls :c:func:`i2c_new_client_device` with the given ``i2c_adapter`` and chip/address arguments. If all goes well, then it registers the subdev with the v4l2_device. diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index 8e2670781c9b..8fdb78f3c6c9 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -25,7 +25,7 @@ size when creating the filesystem. Currently 3 filesystems support DAX: ext2, ext4 and xfs. Enabling DAX on them is different. -Enabling DAX on ext4 and ext2 +Enabling DAX on ext2 ----------------------------- When mounting the filesystem, use the "-o dax" option on the command line or @@ -33,8 +33,8 @@ add 'dax' to the options in /etc/fstab. This works to enable DAX on all files within the filesystem. It is equivalent to the '-o dax=always' behavior below. -Enabling DAX on xfs -------------------- +Enabling DAX on xfs and ext4 +---------------------------- Summary ------- diff --git a/Documentation/filesystems/ext4/verity.rst b/Documentation/filesystems/ext4/verity.rst index 3e4c0ee0e068..e99ff3fd09f7 100644 --- a/Documentation/filesystems/ext4/verity.rst +++ b/Documentation/filesystems/ext4/verity.rst @@ -39,3 +39,6 @@ is encrypted as well as the data itself. Verity files cannot have blocks allocated past the end of the verity metadata. + +Verity and DAX are not compatible and attempts to set both of these flags +on a file will fail. diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst index 4cc74325bf91..17112352f605 100644 --- a/Documentation/gpu/amdgpu.rst +++ b/Documentation/gpu/amdgpu.rst @@ -197,11 +197,14 @@ pp_power_profile_mode .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c :doc: pp_power_profile_mode -busy_percent -~~~~~~~~~~~~ +*_busy_percent +~~~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c - :doc: busy_percent + :doc: gpu_busy_percent + +.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c + :doc: mem_busy_percent GPU Product Information ======================= diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst index c2e29633071e..64689d19dd51 100644 --- a/Documentation/i2c/smbus-protocol.rst +++ b/Documentation/i2c/smbus-protocol.rst @@ -57,7 +57,7 @@ SMBus Quick Command This sends a single bit to the device, at the place of the Rd/Wr bit:: - A Addr Rd/Wr [A] P + S Addr Rd/Wr [A] P Functionality flag: I2C_FUNC_SMBUS_QUICK diff --git a/Documentation/networking/ieee802154.rst b/Documentation/networking/ieee802154.rst index 36ca823a1122..6f4bf8447a21 100644 --- a/Documentation/networking/ieee802154.rst +++ b/Documentation/networking/ieee802154.rst @@ -30,8 +30,8 @@ Socket API The address family, socket addresses etc. are defined in the include/net/af_ieee802154.h header or in the special header -in the userspace package (see either http://wpan.cakelab.org/ or the -git tree at https://github.com/linux-wpan/wpan-tools). +in the userspace package (see either https://linux-wpan.org/wpan-tools.html +or the git tree at https://github.com/linux-wpan/wpan-tools). 6LoWPAN Linux implementation ============================ diff --git a/Documentation/powerpc/papr_hcalls.rst b/Documentation/powerpc/papr_hcalls.rst index 3493631a60f8..48fcf1255a33 100644 --- a/Documentation/powerpc/papr_hcalls.rst +++ b/Documentation/powerpc/papr_hcalls.rst @@ -220,13 +220,51 @@ from the LPAR memory. **H_SCM_HEALTH** | Input: drcIndex -| Out: *health-bitmap, health-bit-valid-bitmap* +| Out: *health-bitmap (r4), health-bit-valid-bitmap (r5)* | Return Value: *H_Success, H_Parameter, H_Hardware* Given a DRC Index return the info on predictive failure and overall health of -the NVDIMM. The asserted bits in the health-bitmap indicate a single predictive -failure and health-bit-valid-bitmap indicate which bits in health-bitmap are -valid. +the PMEM device. The asserted bits in the health-bitmap indicate one or more states +(described in table below) of the PMEM device and health-bit-valid-bitmap indicate +which bits in health-bitmap are valid. The bits are reported in +reverse bit ordering for example a value of 0xC400000000000000 +indicates bits 0, 1, and 5 are valid. + +Health Bitmap Flags: + ++------+-----------------------------------------------------------------------+ +| Bit | Definition | ++======+=======================================================================+ +| 00 | PMEM device is unable to persist memory contents. | +| | If the system is powered down, nothing will be saved. | ++------+-----------------------------------------------------------------------+ +| 01 | PMEM device failed to persist memory contents. Either contents were | +| | not saved successfully on power down or were not restored properly on | +| | power up. | ++------+-----------------------------------------------------------------------+ +| 02 | PMEM device contents are persisted from previous IPL. The data from | +| | the last boot were successfully restored. | ++------+-----------------------------------------------------------------------+ +| 03 | PMEM device contents are not persisted from previous IPL. There was no| +| | data to restore from the last boot. | ++------+-----------------------------------------------------------------------+ +| 04 | PMEM device memory life remaining is critically low | ++------+-----------------------------------------------------------------------+ +| 05 | PMEM device will be garded off next IPL due to failure | ++------+-----------------------------------------------------------------------+ +| 06 | PMEM device contents cannot persist due to current platform health | +| | status. A hardware failure may prevent data from being saved or | +| | restored. | ++------+-----------------------------------------------------------------------+ +| 07 | PMEM device is unable to persist memory contents in certain conditions| ++------+-----------------------------------------------------------------------+ +| 08 | PMEM device is encrypted | ++------+-----------------------------------------------------------------------+ +| 09 | PMEM device has successfully completed a requested erase or secure | +| | erase procedure. | ++------+-----------------------------------------------------------------------+ +|10:63 | Reserved / Unused | ++------+-----------------------------------------------------------------------+ **H_SCM_PERFORMANCE_STATS** diff --git a/Documentation/sh/index.rst b/Documentation/sh/index.rst index bc8db7ba894a..0bd405acf68f 100644 --- a/Documentation/sh/index.rst +++ b/Documentation/sh/index.rst @@ -16,18 +16,6 @@ Store Queue API .. kernel-doc:: arch/sh/kernel/cpu/sh4/sq.c :export: -SH-5 ----- - -TLB Interfaces -~~~~~~~~~~~~~~ - -.. kernel-doc:: arch/sh/mm/tlb-sh5.c - :internal: - -.. kernel-doc:: arch/sh/include/asm/tlb_64.h - :internal: - Machine Specific Interfaces =========================== diff --git a/Documentation/userspace-api/media/conf_nitpick.py b/Documentation/userspace-api/media/conf_nitpick.py index d0c50d75f518..0a8e236d07ab 100644 --- a/Documentation/userspace-api/media/conf_nitpick.py +++ b/Documentation/userspace-api/media/conf_nitpick.py @@ -27,7 +27,7 @@ nitpick_ignore = [ ("c:func", "copy_to_user"), ("c:func", "determine_valid_ioctls"), ("c:func", "ERR_PTR"), - ("c:func", "i2c_new_device"), + ("c:func", "i2c_new_client_device"), ("c:func", "ioctl"), ("c:func", "IS_ERR"), ("c:func", "KERNEL_VERSION"), diff --git a/MAINTAINERS b/MAINTAINERS index 68f21d46614c..496fd4eafb68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8333,7 +8333,7 @@ M: Alexander Aring M: Stefan Schmidt L: linux-wpan@vger.kernel.org S: Maintained -W: http://wpan.cakelab.org/ +W: https://linux-wpan.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git F: Documentation/networking/ieee802154.rst @@ -10808,7 +10808,7 @@ F: Documentation/devicetree/bindings/dma/mtk-* F: drivers/dma/mediatek/ MEDIATEK ETHERNET DRIVER -M: Felix Fietkau +M: Felix Fietkau M: John Crispin M: Sean Wang M: Mark Lee @@ -11369,14 +11369,6 @@ L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/at_xdmac.c -MICROSEMI ETHERNET SWITCH DRIVER -M: Alexandre Belloni -M: Microchip Linux Driver Support -L: netdev@vger.kernel.org -S: Supported -F: drivers/net/ethernet/mscc/ -F: include/soc/mscc/ocelot* - MICROSEMI MIPS SOCS M: Alexandre Belloni M: Microchip Linux Driver Support @@ -12335,6 +12327,18 @@ M: Peter Zijlstra S: Supported F: tools/objtool/ +OCELOT ETHERNET SWITCH DRIVER +M: Microchip Linux Driver Support +M: Vladimir Oltean +M: Claudiu Manoil +M: Alexandre Belloni +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/dsa/ocelot/* +F: drivers/net/ethernet/mscc/ +F: include/soc/mscc/ocelot* +F: net/dsa/tag_ocelot.c + OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER M: Frederic Barrat M: Andrew Donnellan @@ -12691,13 +12695,13 @@ F: arch/mips/boot/dts/ralink/omega2p.dts OP-TEE DRIVER M: Jens Wiklander -L: tee-dev@lists.linaro.org +L: op-tee@lists.trustedfirmware.org S: Maintained F: drivers/tee/optee/ OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER M: Sumit Garg -L: tee-dev@lists.linaro.org +L: op-tee@lists.trustedfirmware.org S: Maintained F: drivers/char/hw_random/optee-rng.c @@ -14192,6 +14196,15 @@ L: dmaengine@vger.kernel.org S: Supported F: drivers/dma/qcom/hidma* +QUALCOMM I2C CCI DRIVER +M: Loic Poulain +M: Robert Foss +L: linux-i2c@vger.kernel.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt +F: drivers/i2c/busses/i2c-qcom-cci.c + QUALCOMM IOMMU M: Rob Clark L: iommu@lists.linux-foundation.org @@ -14534,7 +14547,7 @@ F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt F: drivers/i2c/busses/i2c-emev2.c RENESAS ETHERNET DRIVERS -R: Sergei Shtylyov +R: Sergei Shtylyov L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org F: Documentation/devicetree/bindings/net/renesas,*.txt @@ -16045,8 +16058,10 @@ SPARSE CHECKER M: "Luc Van Oostenryck" L: linux-sparse@vger.kernel.org S: Maintained -W: https://sparse.wiki.kernel.org/ +W: https://sparse.docs.kernel.org/ T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git +Q: https://patchwork.kernel.org/project/linux-sparse/list/ +B: https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools F: include/linux/compiler.h SPEAR CLOCK FRAMEWORK SUPPORT @@ -16759,7 +16774,7 @@ F: include/media/i2c/tw9910.h TEE SUBSYSTEM M: Jens Wiklander -L: tee-dev@lists.linaro.org +L: op-tee@lists.trustedfirmware.org S: Maintained F: Documentation/tee.txt F: drivers/tee/ @@ -18254,14 +18269,6 @@ S: Maintained F: drivers/input/serio/userio.c F: include/uapi/linux/userio.h -VITESSE FELIX ETHERNET SWITCH DRIVER -M: Vladimir Oltean -M: Claudiu Manoil -L: netdev@vger.kernel.org -S: Maintained -F: drivers/net/dsa/ocelot/* -F: net/dsa/tag_ocelot.c - VIVID VIRTUAL VIDEO DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org diff --git a/Makefile b/Makefile index ae5d8220f431..a60c98519c37 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc3 NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -828,7 +828,7 @@ endif ifdef CONFIG_DEBUG_INFO_COMPRESSED DEBUG_CFLAGS += -gz=zlib -KBUILD_AFLAGS += -Wa,--compress-debug-sections=zlib +KBUILD_AFLAGS += -gz=zlib KBUILD_LDFLAGS += --compress-debug-sections=zlib endif @@ -1336,16 +1336,6 @@ dt_binding_check: scripts_dtc # --------------------------------------------------------------------------- # Modules -# install modules.builtin regardless of CONFIG_MODULES -PHONY += _builtin_inst_ -_builtin_inst_: - @mkdir -p $(MODLIB)/ - @cp -f modules.builtin $(MODLIB)/ - @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ - -PHONY += install -install: _builtin_inst_ - ifdef CONFIG_MODULES # By default, build modules as well @@ -1389,7 +1379,7 @@ PHONY += modules_install modules_install: _modinst_ _modinst_post PHONY += _modinst_ -_modinst_: _builtin_inst_ +_modinst_: @rm -rf $(MODLIB)/kernel @rm -f $(MODLIB)/source @mkdir -p $(MODLIB)/kernel @@ -1399,6 +1389,8 @@ _modinst_: _builtin_inst_ ln -s $(CURDIR) $(MODLIB)/build ; \ fi @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order + @cp -f modules.builtin $(MODLIB)/ + @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst # This depmod is only for convenience to give the initial diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi index 05e7b5d4a95b..04f0b1227efe 100644 --- a/arch/arm/boot/dts/am335x-baltos.dtsi +++ b/arch/arm/boot/dts/am335x-baltos.dtsi @@ -369,7 +369,7 @@ &mmc1 { &mmc2 { status = "okay"; vmmc-supply = <&wl12xx_vmmc>; - ti,non-removable; + non-removable; bus-width = <4>; cap-power-off-card; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/am335x-boneblack-common.dtsi b/arch/arm/boot/dts/am335x-boneblack-common.dtsi index 91f93bc89716..dd932220a8bf 100644 --- a/arch/arm/boot/dts/am335x-boneblack-common.dtsi +++ b/arch/arm/boot/dts/am335x-boneblack-common.dtsi @@ -22,6 +22,7 @@ &mmc2 { pinctrl-0 = <&emmc_pins>; bus-width = <8>; status = "okay"; + non-removable; }; &am33xx_pinmux { diff --git a/arch/arm/boot/dts/am335x-boneblack-wireless.dts b/arch/arm/boot/dts/am335x-boneblack-wireless.dts index 3124d94c0b3c..e07dd7979586 100644 --- a/arch/arm/boot/dts/am335x-boneblack-wireless.dts +++ b/arch/arm/boot/dts/am335x-boneblack-wireless.dts @@ -75,7 +75,6 @@ &mmc3 { bus-width = <4>; non-removable; cap-power-off-card; - ti,needs-special-hs-handling; keep-power-in-suspend; pinctrl-names = "default"; pinctrl-0 = <&mmc3_pins &wl18xx_pins>; diff --git a/arch/arm/boot/dts/am335x-boneblue.dts b/arch/arm/boot/dts/am335x-boneblue.dts index 5811fb8d4fdf..83f9452c9cd3 100644 --- a/arch/arm/boot/dts/am335x-boneblue.dts +++ b/arch/arm/boot/dts/am335x-boneblue.dts @@ -367,7 +367,6 @@ &mmc3 { bus-width = <4>; non-removable; cap-power-off-card; - ti,needs-special-hs-handling; keep-power-in-suspend; pinctrl-names = "default"; pinctrl-0 = <&mmc3_pins &wl18xx_pins>; diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts index 4092cd193b8a..609c8db687ec 100644 --- a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts +++ b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts @@ -75,7 +75,6 @@ &mmc3 { bus-width = <4>; non-removable; cap-power-off-card; - ti,needs-special-hs-handling; keep-power-in-suspend; pinctrl-names = "default"; pinctrl-0 = <&mmc3_pins &wl18xx_pins>; diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index 68252dab32c3..a4fc6b168a85 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -743,8 +743,7 @@ xbar-event-map element */ bus-width = <4>; pinctrl-names = "default"; pinctrl-0 = <&mmc3_pins &wlan_pins>; - ti,non-removable; - ti,needs-special-hs-handling; + non-removable; cap-power-off-card; keep-power-in-suspend; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 32f515a295ee..78b6e1f594c9 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -655,7 +655,7 @@ &gpio0 { &mmc2 { status = "okay"; vmmc-supply = <&wl12xx_vmmc>; - ti,non-removable; + non-removable; bus-width = <4>; cap-power-off-card; keep-power-in-suspend; diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index fef582852820..dbedf729205c 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts @@ -339,7 +339,7 @@ &mmc2 { pinctrl-0 = <&emmc_pins>; vmmc-supply = <&vmmcsd_fixed>; bus-width = <8>; - ti,non-removable; + non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi index 6495a125c01f..4e90f9c23d2e 100644 --- a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi +++ b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi @@ -159,7 +159,7 @@ &mmc2 { vmmc-supply = <&vmmcsd_fixed>; bus-width = <8>; pinctrl-0 = <&mmc1_pins_default>; - ti,non-removable; + non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts index 244df9c5a537..f03e72cada41 100644 --- a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts +++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts @@ -451,7 +451,7 @@ &mmc3 { vmmc-supply = <&vmmcsd_fixed>; bus-width = <8>; pinctrl-0 = <&mmc2_pins_default>; - ti,non-removable; + non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts index 6d7608d9377b..f9a027b47962 100644 --- a/arch/arm/boot/dts/am335x-pepper.dts +++ b/arch/arm/boot/dts/am335x-pepper.dts @@ -341,7 +341,7 @@ &mmc2 { pinctrl-0 = <&emmc_pins>; vmmc-supply = <&ldo3_reg>; bus-width = <8>; - ti,non-removable; + non-removable; }; &mmc3 { @@ -351,7 +351,7 @@ &mmc3 { pinctrl-0 = <&wireless_pins>; vmmmc-supply = <&v3v3c_reg>; bus-width = <4>; - ti,non-removable; + non-removable; dmas = <&edma_xbar 12 0 1 &edma_xbar 13 0 2>; dma-names = "tx", "rx"; diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi index 3d0672b53d77..7e46b4c02709 100644 --- a/arch/arm/boot/dts/am335x-phycore-som.dtsi +++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi @@ -69,7 +69,7 @@ &mmc2 { pinctrl-0 = <&emmc_pins>; vmmc-supply = <&vmmc_reg>; bus-width = <8>; - ti,non-removable; + non-removable; status = "disabled"; }; diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts index 4da719098028..f0b222201b86 100644 --- a/arch/arm/boot/dts/am335x-pocketbeagle.dts +++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts @@ -88,7 +88,6 @@ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0) - AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */ >; }; diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 7ff11d6bf0f2..a9cbefc80c0c 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -1335,10 +1335,8 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0x60000 0x1000>; mmc1: mmc@0 { - compatible = "ti,omap4-hsmmc"; - ti,dual-volt; + compatible = "ti,am335-sdhci"; ti,needs-special-reset; - ti,needs-special-hs-handling; dmas = <&edma_xbar 24 0 0 &edma_xbar 25 0 0>; dma-names = "tx", "rx"; @@ -1816,7 +1814,7 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0xd8000 0x1000>; mmc2: mmc@0 { - compatible = "ti,omap4-hsmmc"; + compatible = "ti,am335-sdhci"; ti,needs-special-reset; dmas = <&edma 2 0 &edma 3 0>; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 3b177c9c4412..5fdce106edbb 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -322,10 +322,11 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0x47810000 0x1000>; mmc3: mmc@0 { - compatible = "ti,omap4-hsmmc"; + compatible = "ti,am335-sdhci"; ti,needs-special-reset; interrupts = <29>; reg = <0x0 0x1000>; + status = "disabled"; }; }; @@ -335,7 +336,7 @@ usb: target-module@47400000 { <0x47400010 0x4>; reg-names = "rev", "sysc"; ti,sysc-mask = <(SYSC_OMAP4_FREEEMU | - SYSC_OMAP2_SOFTRESET)>; + SYSC_OMAP4_SOFTRESET)>; ti,sysc-midle = , , ; @@ -347,7 +348,7 @@ usb: target-module@47400000 { clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; - ranges = <0x0 0x47400000 0x5000>; + ranges = <0x0 0x47400000 0x8000>; usb0_phy: usb-phy@1300 { compatible = "ti,am335x-usb-phy"; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index b4861f70f178..51ad9e881a62 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -316,10 +316,11 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0x47810000 0x1000>; mmc3: mmc@0 { - compatible = "ti,omap4-hsmmc"; + compatible = "ti,am437-sdhci"; ti,needs-special-reset; interrupts = ; reg = <0x0 0x1000>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts index 063113a5da2d..a6b4fca8626a 100644 --- a/arch/arm/boot/dts/am437x-cm-t43.dts +++ b/arch/arm/boot/dts/am437x-cm-t43.dts @@ -291,7 +291,7 @@ &mmc2 { pinctrl-0 = <&emmc_pins>; vmmc-supply = <&vmmc_3v3>; bus-width = <8>; - ti,non-removable; + non-removable; }; &spi0 { diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index d692e3b2812a..77378630e5ec 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -91,22 +91,6 @@ lcd0: display { backlight = <&lcd_bl>; - panel-timing { - clock-frequency = <33000000>; - hactive = <800>; - vactive = <480>; - hfront-porch = <210>; - hback-porch = <16>; - hsync-len = <30>; - vback-porch = <10>; - vfront-porch = <22>; - vsync-len = <13>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; - port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; @@ -869,7 +853,7 @@ &mmc2 { pinctrl-names = "default", "sleep"; pinctrl-0 = <&emmc_pins_default>; pinctrl-1 = <&emmc_pins_sleep>; - ti,non-removable; + non-removable; }; &mmc3 { @@ -886,7 +870,7 @@ xbar-event-map element */ pinctrl-1 = <&mmc3_pins_sleep>; cap-power-off-card; keep-power-in-suspend; - ti,non-removable; + non-removable; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 0d0f9fe4a882..7d19395e30c8 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1083,9 +1083,8 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0x60000 0x1000>; mmc1: mmc@0 { - compatible = "ti,omap4-hsmmc"; + compatible = "ti,am437-sdhci"; reg = <0x0 0x1000>; - ti,dual-volt; ti,needs-special-reset; dmas = <&edma 24 0>, <&edma 25 0>; @@ -1598,7 +1597,7 @@ SYSC_OMAP2_SOFTRESET | ranges = <0x0 0xd8000 0x1000>; mmc2: mmc@0 { - compatible = "ti,omap4-hsmmc"; + compatible = "ti,am437-sdhci"; reg = <0x0 0x1000>; ti,needs-special-reset; dmas = <&edma 2 0>, diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 4d5a7ca2e25d..08eabf0f3cbd 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts @@ -134,22 +134,6 @@ lcd0: display { enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; - panel-timing { - clock-frequency = <9000000>; - hactive = <480>; - vactive = <272>; - hfront-porch = <2>; - hback-porch = <2>; - hsync-len = <41>; - vfront-porch = <2>; - vback-porch = <2>; - vsync-len = <10>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; - port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; @@ -719,7 +703,7 @@ &mmc3 { pinctrl-1 = <&mmc3_pins_sleep>; cap-power-off-card; keep-power-in-suspend; - ti,non-removable; + non-removable; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 27259fd6f741..7d4e0dffde7a 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -47,22 +47,6 @@ lcd0: display { backlight = <&lcd_bl>; - panel-timing { - clock-frequency = <33000000>; - hactive = <800>; - vactive = <480>; - hfront-porch = <210>; - hback-porch = <16>; - hsync-len = <30>; - vback-porch = <10>; - vfront-porch = <22>; - vsync-len = <13>; - hsync-active = <0>; - vsync-active = <0>; - de-active = <1>; - pixelclk-active = <1>; - }; - port { lcd_in: endpoint { remote-endpoint = <&dpi_out>; diff --git a/arch/arm/boot/dts/am5729-beagleboneai.dts b/arch/arm/boot/dts/am5729-beagleboneai.dts index 9877d7709d41..4c51c6b05e64 100644 --- a/arch/arm/boot/dts/am5729-beagleboneai.dts +++ b/arch/arm/boot/dts/am5729-beagleboneai.dts @@ -505,7 +505,7 @@ &mac { &cpsw_emac0 { phy-handle = <&phy0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-rxid"; }; &ocp { diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index da6d70f09ef1..3175266ede64 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -200,7 +200,7 @@ uart1: serial@400 { status = "disabled"; }; - dma@20000 { + dma: dma@20000 { compatible = "arm,pl330", "arm,primecell"; reg = <0x20000 0x1000>; interrupts = , @@ -215,6 +215,8 @@ dma@20000 { clocks = <&iprocslow>; clock-names = "apb_pclk"; #dma-cells = <1>; + dma-coherent; + status = "disabled"; }; sdio: sdhci@21000 { @@ -257,10 +259,10 @@ amac2: ethernet@24000 { status = "disabled"; }; - mailbox: mailbox@25000 { + mailbox: mailbox@25c00 { compatible = "brcm,iproc-fa2-mbox"; - reg = <0x25000 0x445>; - interrupts = ; + reg = <0x25c00 0x400>; + interrupts = ; #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts index 334325390aed..29bbecd36f65 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts @@ -17,6 +17,7 @@ chosen { }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x18000000>; }; diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts index 8c388eb8a08f..7be4c4e628e0 100644 --- a/arch/arm/boot/dts/bcm958522er.dts +++ b/arch/arm/boot/dts/bcm958522er.dts @@ -58,6 +58,10 @@ gpio-restart { /* USB 3 support needed to be complete */ +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts index c339771bb22e..e58ed7e95346 100644 --- a/arch/arm/boot/dts/bcm958525er.dts +++ b/arch/arm/boot/dts/bcm958525er.dts @@ -58,6 +58,10 @@ gpio-restart { /* USB 3 support needed to be complete */ +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts index 1c72ec8288de..716da62f5788 100644 --- a/arch/arm/boot/dts/bcm958525xmc.dts +++ b/arch/arm/boot/dts/bcm958525xmc.dts @@ -58,6 +58,10 @@ gpio-restart { /* XHCI support needed to be complete */ +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts index 96a021cebd97..a49c2fd21f4a 100644 --- a/arch/arm/boot/dts/bcm958622hr.dts +++ b/arch/arm/boot/dts/bcm958622hr.dts @@ -58,6 +58,10 @@ gpio-restart { /* USB 3 and SLIC support needed to be complete */ +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index b2c7f21d471e..dd6dff6452b8 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -58,6 +58,10 @@ gpio-restart { /* USB 3 and SLIC support needed to be complete */ +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index 536fb24f38bb..a71371b4065e 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -69,6 +69,10 @@ &i2c0 { status = "okay"; }; +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts index 3fcca12d83c2..7b84b54436ed 100644 --- a/arch/arm/boot/dts/bcm958625k.dts +++ b/arch/arm/boot/dts/bcm958625k.dts @@ -48,6 +48,10 @@ memory@60000000 { }; }; +&dma { + status = "okay"; +}; + &amac0 { status = "okay"; }; diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi index f89a64cbcd53..2cf6a529d4ad 100644 --- a/arch/arm/boot/dts/dra7-evm-common.dtsi +++ b/arch/arm/boot/dts/dra7-evm-common.dtsi @@ -245,26 +245,6 @@ &mcasp3 { rx-num-evt = <32>; }; -&mailbox5 { - status = "okay"; - mbox_ipu1_ipc3x: mbox_ipu1_ipc3x { - status = "okay"; - }; - mbox_dsp1_ipc3x: mbox_dsp1_ipc3x { - status = "okay"; - }; -}; - -&mailbox6 { - status = "okay"; - mbox_ipu2_ipc3x: mbox_ipu2_ipc3x { - status = "okay"; - }; - mbox_dsp2_ipc3x: mbox_dsp2_ipc3x { - status = "okay"; - }; -}; - &pcie1_rc { status = "okay"; }; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 62ca89551219..0c6f26605506 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -1207,9 +1207,8 @@ target-module@36000 { /* 0x48036000, ap 9 4e.0 */ , ; /* Domains (P, C): l4per_pwrdm, l4per_clkdm */ - clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>, - <&timer_sys_clk_div>; - clock-names = "fck", "timer_sys_ck"; + clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x36000 0x1000>; @@ -3352,8 +3351,8 @@ target-module@20000 { /* 0x48820000, ap 5 08.0 */ , ; /* Domains (P, C): ipu_pwrdm, ipu_clkdm */ - clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>, <&timer_sys_clk_div>; - clock-names = "fck", "timer_sys_ck"; + clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x20000 0x1000>; @@ -3361,8 +3360,8 @@ target-module@20000 { /* 0x48820000, ap 5 08.0 */ timer5: timer@0 { compatible = "ti,omap5430-timer"; reg = <0x0 0x80>; - clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>; - clock-names = "fck"; + clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>, <&timer_sys_clk_div>; + clock-names = "fck", "timer_sys_ck"; interrupts = ; }; }; @@ -3379,9 +3378,8 @@ target-module@22000 { /* 0x48822000, ap 7 24.0 */ , ; /* Domains (P, C): ipu_pwrdm, ipu_clkdm */ - clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>, - <&timer_sys_clk_div>; - clock-names = "fck", "timer_sys_ck"; + clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>; + clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x22000 0x1000>; @@ -3389,8 +3387,8 @@ target-module@22000 { /* 0x48822000, ap 7 24.0 */ timer6: timer@0 { compatible = "ti,omap5430-timer"; reg = <0x0 0x80>; - clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>; - clock-names = "fck"; + clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>, <&timer_sys_clk_div>; + clock-names = "fck", "timer_sys_ck"; interrupts = ; }; }; @@ -3498,8 +3496,8 @@ target-module@2a000 { /* 0x4882a000, ap 15 10.0 */ timer14: timer@0 { compatible = "ti,omap5430-timer"; reg = <0x0 0x80>; - clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>; - clock-names = "fck"; + clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>, <&timer_sys_clk_div>; + clock-names = "fck", "timer_sys_ck"; interrupts = ; ti,timer-pwm; }; @@ -3526,8 +3524,8 @@ target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ timer15: timer@0 { compatible = "ti,omap5430-timer"; reg = <0x0 0x80>; - clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; - clock-names = "fck"; + clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>, <&timer_sys_clk_div>; + clock-names = "fck", "timer_sys_ck"; interrupts = ; ti,timer-pwm; }; @@ -3554,8 +3552,8 @@ target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ timer16: timer@0 { compatible = "ti,omap5430-timer"; reg = <0x0 0x80>; - clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; - clock-names = "fck"; + clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>, <&timer_sys_clk_div>; + clock-names = "fck", "timer_sys_ck"; interrupts = ; ti,timer-pwm; }; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi index f05e91841202..53a25fba34f6 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi @@ -232,13 +232,6 @@ &usdhc2 { status = "okay"; }; -&wdog1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_wdog>; - fsl,ext-reset-output; - status = "okay"; -}; - &iomuxc { pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>; @@ -409,10 +402,4 @@ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170f9 MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9 >; }; - - pinctrl_wdog: wdoggrp { - fsl,pins = < - MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x30b0 - >; - }; }; diff --git a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi index a17af4d9bfdf..61ba21a605a8 100644 --- a/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi +++ b/arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi @@ -57,6 +57,13 @@ &qspi { status = "okay"; }; +&wdog1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; + &iomuxc { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reset_out>; @@ -106,4 +113,10 @@ pinctrl_reset_out: rstoutgrp { MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09 0x1b0b0 >; }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY 0x18b0 + >; + }; }; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index e39eee628afd..08a7d3ce383f 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -13,8 +13,10 @@ cpcap: pmic@0 { #interrupt-cells = <2>; #address-cells = <1>; #size-cells = <0>; - spi-max-frequency = <3000000>; + spi-max-frequency = <9600000>; spi-cs-high; + spi-cpol; + spi-cpha; cpcap_adc: adc { compatible = "motorola,mapphone-cpcap-adc"; diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts index 8047e8cdb3af..4548d87534e3 100644 --- a/arch/arm/boot/dts/omap4-duovero-parlor.dts +++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts @@ -139,7 +139,7 @@ &gpmc { ethernet@gpmc { reg = <5 0 0xff>; interrupt-parent = <&gpio2>; - interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */ + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */ phy-mode = "mii"; diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 6c2b07f0704d..4400f5f8e099 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -662,6 +662,6 @@ &timer1_target { ti,no-idle; timer@0 { assigned-clocks = <&l4_wkup_clkctrl OMAP4_TIMER1_CLKCTRL 24>; - assigned-clock-parents = <&sys_clkin_ck>; + assigned-clock-parents = <&sys_32k_ck>; }; }; diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index e6308fb76183..a88ee5294d35 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -100,79 +100,6 @@ led-8 { }; }; - mcc { - compatible = "arm,vexpress,config-bus"; - arm,vexpress,config-bridge = <&v2m_sysreg>; - - oscclk0 { - /* MCC static memory clock */ - compatible = "arm,vexpress-osc"; - arm,vexpress-sysreg,func = <1 0>; - freq-range = <25000000 60000000>; - #clock-cells = <0>; - clock-output-names = "v2m:oscclk0"; - }; - - v2m_oscclk1: oscclk1 { - /* CLCD clock */ - compatible = "arm,vexpress-osc"; - arm,vexpress-sysreg,func = <1 1>; - freq-range = <23750000 65000000>; - #clock-cells = <0>; - clock-output-names = "v2m:oscclk1"; - }; - - v2m_oscclk2: oscclk2 { - /* IO FPGA peripheral clock */ - compatible = "arm,vexpress-osc"; - arm,vexpress-sysreg,func = <1 2>; - freq-range = <24000000 24000000>; - #clock-cells = <0>; - clock-output-names = "v2m:oscclk2"; - }; - - volt-vio { - /* Logic level voltage */ - compatible = "arm,vexpress-volt"; - arm,vexpress-sysreg,func = <2 0>; - regulator-name = "VIO"; - regulator-always-on; - label = "VIO"; - }; - - temp-mcc { - /* MCC internal operating temperature */ - compatible = "arm,vexpress-temp"; - arm,vexpress-sysreg,func = <4 0>; - label = "MCC"; - }; - - reset { - compatible = "arm,vexpress-reset"; - arm,vexpress-sysreg,func = <5 0>; - }; - - muxfpga { - compatible = "arm,vexpress-muxfpga"; - arm,vexpress-sysreg,func = <7 0>; - }; - - shutdown { - compatible = "arm,vexpress-shutdown"; - arm,vexpress-sysreg,func = <8 0>; - }; - - reboot { - compatible = "arm,vexpress-reboot"; - arm,vexpress-sysreg,func = <9 0>; - }; - - dvimode { - compatible = "arm,vexpress-dvimode"; - arm,vexpress-sysreg,func = <11 0>; - }; - }; - bus@8000000 { motherboard-bus { model = "V2M-P1"; @@ -435,6 +362,79 @@ clcd_pads: endpoint { }; }; }; + + mcc { + compatible = "arm,vexpress,config-bus"; + arm,vexpress,config-bridge = <&v2m_sysreg>; + + oscclk0 { + /* MCC static memory clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 0>; + freq-range = <25000000 60000000>; + #clock-cells = <0>; + clock-output-names = "v2m:oscclk0"; + }; + + v2m_oscclk1: oscclk1 { + /* CLCD clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 1>; + freq-range = <23750000 65000000>; + #clock-cells = <0>; + clock-output-names = "v2m:oscclk1"; + }; + + v2m_oscclk2: oscclk2 { + /* IO FPGA peripheral clock */ + compatible = "arm,vexpress-osc"; + arm,vexpress-sysreg,func = <1 2>; + freq-range = <24000000 24000000>; + #clock-cells = <0>; + clock-output-names = "v2m:oscclk2"; + }; + + volt-vio { + /* Logic level voltage */ + compatible = "arm,vexpress-volt"; + arm,vexpress-sysreg,func = <2 0>; + regulator-name = "VIO"; + regulator-always-on; + label = "VIO"; + }; + + temp-mcc { + /* MCC internal operating temperature */ + compatible = "arm,vexpress-temp"; + arm,vexpress-sysreg,func = <4 0>; + label = "MCC"; + }; + + reset { + compatible = "arm,vexpress-reset"; + arm,vexpress-sysreg,func = <5 0>; + }; + + muxfpga { + compatible = "arm,vexpress-muxfpga"; + arm,vexpress-sysreg,func = <7 0>; + }; + + shutdown { + compatible = "arm,vexpress-shutdown"; + arm,vexpress-sysreg,func = <8 0>; + }; + + reboot { + compatible = "arm,vexpress-reboot"; + arm,vexpress-sysreg,func = <9 0>; + }; + + dvimode { + compatible = "arm,vexpress-dvimode"; + arm,vexpress-sysreg,func = <11 0>; + }; + }; }; }; }; diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h index 84dc0ba822f5..5dcf3c6011b7 100644 --- a/arch/arm/include/asm/efi.h +++ b/arch/arm/include/asm/efi.h @@ -87,4 +87,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, return dram_base + SZ_512M; } +struct efi_arm_entry_state { + u32 cpsr_before_ebs; + u32 sctlr_before_ebs; + u32 cpsr_after_ebs; + u32 sctlr_after_ebs; +}; + #endif /* _ASM_ARM_EFI_H */ diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 10499d44964a..9a79ef6b1876 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -84,7 +84,8 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old, old = __opcode_to_mem_arm(old); if (validate) { - if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(&replaced, (void *)pc, + MCOUNT_INSN_SIZE)) return -EFAULT; if (replaced != old) diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 6a95b9296640..7bd30c0a4280 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -236,7 +236,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 65a3b1e75480..17d5a785df28 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -396,7 +396,7 @@ int is_valid_bugaddr(unsigned long pc) u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE); #endif - if (probe_kernel_address((unsigned *)pc, bkpt)) + if (get_kernel_nofault(bkpt, (void *)pc)) return 0; return bkpt == insn; diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index 6aa938b949db..1df0ee01ee02 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -53,6 +53,7 @@ config ARCH_BCM_NSP select ARM_ERRATA_754322 select ARM_ERRATA_775420 select ARM_ERRATA_764369 if SMP + select ARM_TIMER_SP804 select THERMAL select THERMAL_OF help diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index f057df813f83..e9962b48e30c 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram( if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram( if (virt_out) *virt_out = virt; +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index dd34dff13762..40c74b4c4d73 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -493,14 +493,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -523,7 +523,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat); if (ret) { pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret); - goto put_node; + goto put_device; } ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat); @@ -570,7 +570,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) &imx6_suspend, MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info)); - goto put_node; + goto put_device; pl310_cache_map_failed: iounmap(pm_info->gpc_base.vbase); @@ -580,6 +580,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) iounmap(pm_info->src_base.vbase); src_map_failed: iounmap(pm_info->mmdc_base.vbase); +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 82706af307de..c630457bb228 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -3489,7 +3489,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = { }; static const struct omap_hwmod_reset omap_reset_quirks[] = { - { .match = "dss", .len = 3, .reset = omap_dss_reset, }, + { .match = "dss_core", .len = 8, .reset = omap_dss_reset, }, { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, }, { .match = "i2c", .len = 3, .reset = omap_i2c_reset, }, { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, }, diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c index dcb98937fcf5..ffecbf29646f 100644 --- a/arch/arm/mach-sti/board-dt.c +++ b/arch/arm/mach-sti/board-dt.c @@ -20,14 +20,6 @@ static const char *const stih41x_dt_match[] __initconst = { NULL }; -static void sti_l2_write_sec(unsigned long val, unsigned reg) -{ - /* - * We can't write to secure registers as we are in non-secure - * mode, until we have some SMI service available. - */ -} - DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree") .dt_compat = stih41x_dt_match, .l2c_aux_val = L2C_AUX_CTRL_SHARED_OVERRIDE | @@ -36,5 +28,4 @@ DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree") L2C_AUX_CTRL_WAY_SIZE(4), .l2c_aux_mask = 0xc0000fff, .smp = smp_ops(sti_smp_ops), - .l2c_write_sec = sti_l2_write_sec, MACHINE_END diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 84718eddae60..81a627e6e1c5 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -774,7 +774,7 @@ static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst) if (user_mode(regs)) fault = get_user(instr, ip); else - fault = probe_kernel_address(ip, instr); + fault = get_kernel_nofault(instr, ip); *inst = __mem_to_opcode_arm(instr); @@ -789,7 +789,7 @@ static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst) if (user_mode(regs)) fault = get_user(instr, ip); else - fault = probe_kernel_address(ip, instr); + fault = get_kernel_nofault(instr, ip); *inst = __mem_to_opcode_thumb16(instr); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 31380da53689..66dc41fd49f2 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1518,9 +1518,9 @@ config ARM64_PTR_AUTH default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC - # GCC 9.1 and later inserts a .note.gnu.property section note for PAC + # Modern compilers insert a .note.gnu.property section note for PAC # which is only understood by binutils starting with version 2.33.1. - depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000 + depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100) depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help @@ -1564,7 +1564,7 @@ config CC_HAS_SIGN_RETURN_ADDRESS def_bool $(cc-option,-msign-return-address=all) config AS_HAS_PAC - def_bool $(as-option,-Wa$(comma)-march=armv8.3-a) + def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a) config AS_HAS_CFI_NEGATE_RA_STATE def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n) @@ -1630,6 +1630,8 @@ config ARM64_BTI_KERNEL depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697 depends on !CC_IS_GCC || GCC_VERSION >= 100100 + # https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55 + depends on !CC_IS_CLANG || CLANG_VERSION >= 100001 depends on !(CC_IS_CLANG && GCOV_KERNEL) depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index cdf7ec0b975e..265c4461031f 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -8,21 +8,6 @@ config PID_IN_CONTEXTIDR instructions during context switch. Say Y here only if you are planning to use hardware trace tools with this kernel. -config ARM64_RANDOMIZE_TEXT_OFFSET - bool "Randomize TEXT_OFFSET at build time" - help - Say Y here if you want the image load offset (AKA TEXT_OFFSET) - of the kernel to be randomized at build-time. When selected, - this option will cause TEXT_OFFSET to be randomized upon any - build of the kernel, and the offset will be reflected in the - text_offset field of the resulting Image. This can be used to - fuzz-test bootloaders which respect text_offset. - - This option is intended for bootloader and/or kernel testing - only. Bootloaders must make no assumptions regarding the value - of TEXT_OFFSET and platforms must not require a specific - value. - config DEBUG_EFI depends on EFI && DEBUG_INFO bool "UEFI debugging" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 76359cfb328a..a0d94d063fa8 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -121,13 +121,7 @@ endif head-y := arch/arm64/kernel/head.o # The byte offset of the kernel image in RAM from the start of RAM. -ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) -TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ - int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ - rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") -else TEXT_OFFSET := 0x0 -endif ifeq ($(CONFIG_KASAN_SW_TAGS), y) KASAN_SHADOW_SCALE_SHIFT := 4 diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi index fb0137a8611c..94911b1707ef 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi @@ -136,7 +136,7 @@ buck6_reg: BUCK6 { ldo1_reg: LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <3000000>; + regulator-min-microvolt = <1600000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; @@ -144,7 +144,7 @@ ldo1_reg: LDO1 { ldo2_reg: LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <900000>; + regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts index e5ec8322796d..0f1d7f8aeac4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts @@ -208,7 +208,7 @@ buck6_reg: BUCK6 { ldo1_reg: LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <3000000>; + regulator-min-microvolt = <1600000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; @@ -216,7 +216,7 @@ ldo1_reg: LDO1 { ldo2_reg: LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <900000>; + regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts index d07e0e6a00cc..a1e5483dbbbe 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts @@ -113,7 +113,7 @@ buck6_reg: BUCK6 { ldo1_reg: LDO1 { regulator-name = "LDO1"; - regulator-min-microvolt = <3000000>; + regulator-min-microvolt = <1600000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; @@ -121,7 +121,7 @@ ldo1_reg: LDO1 { ldo2_reg: LDO2 { regulator-name = "LDO2"; - regulator-min-microvolt = <900000>; + regulator-min-microvolt = <800000>; regulator-max-microvolt = <900000>; regulator-boot-on; regulator-always-on; diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 81fefd2a1d02..ba89a9af820a 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -12,7 +12,6 @@ * instead. */ #define BTI_C hint 34 ; -#define BTI_J hint 36 ; /* * When using in-kernel BTI we need to ensure that PCS-conformant assembly @@ -43,11 +42,6 @@ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \ BTI_C -#define SYM_INNER_LABEL(name, linkage) \ - .type name SYM_T_NONE ASM_NL \ - SYM_ENTRY(name, linkage, SYM_A_NONE) \ - BTI_J - #endif /* diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 68140fdd89d6..8444df000181 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -19,6 +19,9 @@ typedef struct { atomic64_t id; +#ifdef CONFIG_COMPAT + void *sigpage; +#endif void *vdso; unsigned long flags; } mm_context_t; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6dbd267ab931..758e2d1577d0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -416,7 +416,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) #define pgprot_nx(prot) \ - __pgprot_modify(prot, 0, PTE_PXN) + __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) /* * Mark the prot value as uncacheable and unbufferable. diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 151f28521f1e..a561cbb91d4d 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ sys_compat.o -ifneq ($(CONFIG_COMPAT_VDSO), y) obj-$(CONFIG_COMPAT) += sigreturn32.o -endif obj-$(CONFIG_KUSER_HELPERS) += kuser32.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o obj-$(CONFIG_MODULES) += module.o diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ad06d6802d2e..cf50c53e9357 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), {}, }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4ae41670c2e6..9f63053a63a9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), { /* sentinel */ } }; char const *str = "kpti command line option"; diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 35cb5e66c504..55c8f3ec6705 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -119,10 +120,20 @@ struct fpsimd_last_state_struct { static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); /* Default VL for tasks that don't set it explicitly: */ -static int sve_default_vl = -1; +static int __sve_default_vl = -1; + +static int get_sve_default_vl(void) +{ + return READ_ONCE(__sve_default_vl); +} #ifdef CONFIG_ARM64_SVE +static void set_sve_default_vl(int val) +{ + WRITE_ONCE(__sve_default_vl, val); +} + /* Maximum supported vector length across all CPUs (initially poisoned) */ int __ro_after_init sve_max_vl = SVE_VL_MIN; int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; @@ -338,13 +349,13 @@ static unsigned int find_supported_vector_length(unsigned int vl) return sve_vl_from_vq(__bit_to_vq(bit)); } -#ifdef CONFIG_SYSCTL +#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) static int sve_proc_do_default_vl(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; - int vl = sve_default_vl; + int vl = get_sve_default_vl(); struct ctl_table tmp_table = { .data = &vl, .maxlen = sizeof(vl), @@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write, if (!sve_vl_valid(vl)) return -EINVAL; - sve_default_vl = find_supported_vector_length(vl); + set_sve_default_vl(find_supported_vector_length(vl)); return 0; } @@ -383,9 +394,9 @@ static int __init sve_sysctl_init(void) return 0; } -#else /* ! CONFIG_SYSCTL */ +#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ static int __init sve_sysctl_init(void) { return 0; } -#endif /* ! CONFIG_SYSCTL */ +#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */ #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) @@ -868,7 +879,7 @@ void __init sve_setup(void) * For the default VL, pick the maximum supported value <= 64. * VL == 64 is guaranteed not to grow the signal frame. */ - sve_default_vl = find_supported_vector_length(64); + set_sve_default_vl(find_supported_vector_length(64)); bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); @@ -889,7 +900,7 @@ void __init sve_setup(void) pr_info("SVE: maximum available vector length %u bytes per vector\n", sve_max_vl); pr_info("SVE: default vector length %u bytes per vector\n", - sve_default_vl); + get_sve_default_vl()); /* KVM decides whether to support mismatched systems. Just warn here: */ if (sve_max_virtualisable_vl < sve_max_vl) @@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void) * vector length configured: no kernel task can become a user * task without an exec and hence a call to this function. * By the time the first call to this function is made, all - * early hardware probing is complete, so sve_default_vl + * early hardware probing is complete, so __sve_default_vl * should be valid. * If a bug causes this to go wrong, we make some noise and * try to fudge thread.sve_vl to a safe value here. */ vl = current->thread.sve_vl_onexec ? - current->thread.sve_vl_onexec : sve_default_vl; + current->thread.sve_vl_onexec : get_sve_default_vl(); if (WARN_ON(!sve_vl_valid(vl))) vl = SVE_VL_MIN; diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 0b727edf4104..af234a1e08b7 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, return 0; } +static int watchpoint_report(struct perf_event *wp, unsigned long addr, + struct pt_regs *regs) +{ + int step = is_default_overflow_handler(wp); + struct arch_hw_breakpoint *info = counter_arch_bp(wp); + + info->trigger = addr; + + /* + * If we triggered a user watchpoint from a uaccess routine, then + * handle the stepping ourselves since userspace really can't help + * us with this. + */ + if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0) + step = 1; + else + perf_bp_event(wp, regs); + + return step; +} + static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, u64 val; struct perf_event *wp, **slots; struct debug_info *debug_info; - struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); @@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, if (dist != 0) continue; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; + step = watchpoint_report(wp, addr, regs); } - if (min_dist > 0 && min_dist != -1) { - /* No exact match found. */ - wp = slots[closest_match]; - info = counter_arch_bp(wp); - info->trigger = addr; - perf_bp_event(wp, regs); - /* Do we need to handle the stepping? */ - if (is_default_overflow_handler(wp)) - step = 1; - } + /* No exact match found? */ + if (min_dist > 0 && min_dist != -1) + step = watchpoint_report(slots[closest_match], addr, regs); + rcu_read_unlock(); if (!step) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 684d871ae38d..a107375005bc 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -135,7 +135,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp) int ret; __le32 val; - ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); if (!ret) *insnp = le32_to_cpu(val); @@ -151,7 +151,7 @@ static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) raw_spin_lock_irqsave(&patch_lock, flags); waddr = patch_map(addr, FIX_TEXT_POKE0); - ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); patch_unmap(FIX_TEXT_POKE0); raw_spin_unlock_irqrestore(&patch_lock, flags); diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 522e6f517ec0..361a1143e09e 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -219,8 +219,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) MEMBLOCK_NONE, &start, &end, NULL) nr_ranges++; - cmem = kmalloc(sizeof(struct crash_mem) + - sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL); + cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); if (!cmem) return -ENOMEM; diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 0bbac612146e..666b225aeb3a 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; /* - * Compat (i.e. 32 bit) mode: - * - PC has been set in the pt_regs struct in kernel_entry, - * - Handle SP and LR here. + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatability reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; + if (idx == 15) + return regs->pc; } if ((u32)idx == PERF_REG_ARM64_SP) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index d1c95dcf1d78..cbe49cd117cf 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -120,15 +120,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) void *alloc_insn_page(void) { - void *page; - - page = vmalloc_exec(PAGE_SIZE); - if (page) { - set_memory_ro((unsigned long)page, 1); - set_vm_flush_reset_perms(page); - } - - return page; + return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __func__); } /* arm kprobe: install breakpoint in text */ diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 82feca6f7052..2f507f565c48 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, retcode = ptr_to_compat(ka->sa.sa_restorer); } else { /* Set up sigreturn pointer */ -#ifdef CONFIG_COMPAT_VDSO - void *vdso_base = current->mm->context.vdso; - void *vdso_trampoline; - - if (ka->sa.sa_flags & SA_SIGINFO) { - if (thumb) { - vdso_trampoline = VDSO_SYMBOL(vdso_base, - compat_rt_sigreturn_thumb); - } else { - vdso_trampoline = VDSO_SYMBOL(vdso_base, - compat_rt_sigreturn_arm); - } - } else { - if (thumb) { - vdso_trampoline = VDSO_SYMBOL(vdso_base, - compat_sigreturn_thumb); - } else { - vdso_trampoline = VDSO_SYMBOL(vdso_base, - compat_sigreturn_arm); - } - } - - retcode = ptr_to_compat(vdso_trampoline) + thumb; -#else unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) idx += 3; - retcode = (unsigned long)current->mm->context.vdso + + retcode = (unsigned long)current->mm->context.sigpage + (idx << 2) + thumb; -#endif } regs->regs[0] = usig; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 50cc30acf106..47f651df781c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -376,7 +376,7 @@ static int call_undef_hook(struct pt_regs *regs) if (!user_mode(regs)) { __le32 instr_le; - if (probe_kernel_address((__force __le32 *)pc, instr_le)) + if (get_kernel_nofault(instr_le, (__force __le32 *)pc)) goto exit; instr = le32_to_cpu(instr_le); } else if (compat_thumb_mode(regs)) { @@ -813,6 +813,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) handler[reason], smp_processor_id(), esr, esr_get_class_string(esr)); + __show_regs(regs); local_daif_mask(); panic("bad mode"); } diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 4e016574bd91..e546df0efefb 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -191,15 +191,12 @@ enum aarch32_map { #ifdef CONFIG_COMPAT_VDSO AA32_MAP_VVAR, AA32_MAP_VDSO, -#else - AA32_MAP_SIGPAGE #endif + AA32_MAP_SIGPAGE }; static struct page *aarch32_vectors_page __ro_after_init; -#ifndef CONFIG_COMPAT_VDSO static struct page *aarch32_sig_page __ro_after_init; -#endif static struct vm_special_mapping aarch32_vdso_maps[] = { [AA32_MAP_VECTORS] = { @@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { .name = "[vdso]", .mremap = aarch32_vdso_mremap, }, -#else +#endif /* CONFIG_COMPAT_VDSO */ [AA32_MAP_SIGPAGE] = { .name = "[sigpage]", /* ABI */ .pages = &aarch32_sig_page, }, -#endif /* CONFIG_COMPAT_VDSO */ }; static int aarch32_alloc_kuser_vdso_page(void) @@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void) return 0; } -#ifdef CONFIG_COMPAT_VDSO -static int __aarch32_alloc_vdso_pages(void) -{ - int ret; - - vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; - vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; - - ret = __vdso_init(VDSO_ABI_AA32); - if (ret) - return ret; - - return aarch32_alloc_kuser_vdso_page(); -} -#else -static int __aarch32_alloc_vdso_pages(void) +static int aarch32_alloc_sigpage(void) { extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; unsigned long sigpage; - int ret; sigpage = get_zeroed_page(GFP_ATOMIC); if (!sigpage) @@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void) memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); aarch32_sig_page = virt_to_page(sigpage); flush_dcache_page(aarch32_sig_page); + return 0; +} - ret = aarch32_alloc_kuser_vdso_page(); - if (ret) - free_page(sigpage); +#ifdef CONFIG_COMPAT_VDSO +static int __aarch32_alloc_vdso_pages(void) +{ + vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR]; + vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO]; - return ret; + return __vdso_init(VDSO_ABI_AA32); } #endif /* CONFIG_COMPAT_VDSO */ static int __init aarch32_alloc_vdso_pages(void) { - return __aarch32_alloc_vdso_pages(); + int ret; + +#ifdef CONFIG_COMPAT_VDSO + ret = __aarch32_alloc_vdso_pages(); + if (ret) + return ret; +#endif + + ret = aarch32_alloc_sigpage(); + if (ret) + return ret; + + return aarch32_alloc_kuser_vdso_page(); } arch_initcall(aarch32_alloc_vdso_pages); @@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) return PTR_ERR_OR_ZERO(ret); } -#ifndef CONFIG_COMPAT_VDSO static int aarch32_sigreturn_setup(struct mm_struct *mm) { unsigned long addr; @@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) if (IS_ERR(ret)) goto out; - mm->context.vdso = (void *)addr; + mm->context.sigpage = (void *)addr; out: return PTR_ERR_OR_ZERO(ret); } -#endif /* !CONFIG_COMPAT_VDSO */ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { @@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) mm, bprm, uses_interp); -#else - ret = aarch32_sigreturn_setup(mm); + if (ret) + goto out; #endif /* CONFIG_COMPAT_VDSO */ + ret = aarch32_sigreturn_setup(mm); out: mmap_write_unlock(mm); return ret; diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 556d424c6f52..45d5cfe46429 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti # potential future proofing if we end up with internal calls to the exported # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so # preparation in build-time C")). -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ - -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \ + $(btildflags-y) -T ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING -CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) KBUILD_CFLAGS += $(DISABLE_LTO) KASAN_SANITIZE := n UBSAN_SANITIZE := n diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S index 620a3ef837b7..0e18729abc3b 100644 --- a/arch/arm64/kernel/vdso/sigreturn.S +++ b/arch/arm64/kernel/vdso/sigreturn.S @@ -18,29 +18,40 @@ .text +/* + * NOTE!!! You may notice that all of the .cfi directives in this file have + * been commented out. This is because they have been shown to trigger segfaults + * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread + * cleanup handlers during the thread cancellation dance. By omitting the + * directives, we trigger an arm64-specific fallback path in the unwinder which + * recognises the signal frame and restores many of the registers directly from + * the sigcontext. Re-enabling the cfi directives here therefore needs to be + * much more comprehensive to reduce the risk of further regressions. + */ + /* Ensure that the mysterious NOP can be associated with a function. */ - .cfi_startproc +// .cfi_startproc /* - * .cfi_signal_frame causes the corresponding Frame Description Entry in the - * .eh_frame section to be annotated as a signal frame. This allows DWARF - * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits - * unwinding out of the signal trampoline without the need for the mysterious - * NOP. + * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in + * the .eh_frame section to be annotated as a signal frame. This allows DWARF + * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify + * the next frame using the unmodified return address instead of subtracting 1, + * which may yield the wrong FDE. */ - .cfi_signal_frame +// .cfi_signal_frame /* * Tell the unwinder where to locate the frame record linking back to the - * interrupted context. We don't provide unwind info for registers other - * than the frame pointer and the link register here; in practice, this - * is sufficient for unwinding in C/C++ based runtimes and the values in - * the sigcontext may have been modified by this point anyway. Debuggers + * interrupted context. We don't provide unwind info for registers other than + * the frame pointer and the link register here; in practice, this is likely to + * be insufficient for unwinding in C/C++ based runtimes, especially without a + * means to restore the stack pointer. Thankfully, unwinders and debuggers * already have baked-in strategies for attempting to unwind out of signals. */ - .cfi_def_cfa x29, 0 - .cfi_offset x29, 0 * 8 - .cfi_offset x30, 1 * 8 +// .cfi_def_cfa x29, 0 +// .cfi_offset x29, 0 * 8 +// .cfi_offset x30, 1 * 8 /* * This mysterious NOP is required for some unwinders (e.g. libc++) that @@ -51,16 +62,19 @@ nop // Mysterious NOP /* - * GDB relies on being able to identify the sigreturn instruction sequence to - * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START() - * here, as it will emit a BTI C instruction and break the unwinder. Thankfully, - * this function is only ever called from a RET and so omitting the landing pad - * is perfectly fine. + * GDB, libgcc and libunwind rely on being able to identify the sigreturn + * instruction sequence to unwind from signal handlers. We cannot, therefore, + * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the + * unwinder. Thankfully, this function is only ever called from a RET and so + * omitting the landing pad is perfectly fine. */ SYM_CODE_START(__kernel_rt_sigreturn) +// PLEASE DO NOT MODIFY mov x8, #__NR_rt_sigreturn +// PLEASE DO NOT MODIFY svc #0 - .cfi_endproc +// PLEASE DO NOT MODIFY +// .cfi_endproc SYM_CODE_END(__kernel_rt_sigreturn) emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 7ea1e827e505..d88148bef6b0 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -140,7 +140,6 @@ hostprogs := $(munge) c-obj-vdso := note.o c-obj-vdso-gettimeofday := vgettimeofday.o -asm-obj-vdso := sigreturn.o ifneq ($(c-gettimeofday-y),) VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y) diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S deleted file mode 100644 index b0091064c3d6..000000000000 --- a/arch/arm64/kernel/vdso32/sigreturn.S +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This file provides both A32 and T32 versions, in accordance with the - * arm sigreturn code. - * - * Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to - * understand some of the craziness in here. - * - * Copyright (C) 2018 ARM Limited - */ - -#include -#include -#include - - .text - - .arm - .fnstart - .save {r0-r15} - .pad #COMPAT_SIGFRAME_REGS_OFFSET - nop -SYM_CODE_START(__kernel_sigreturn_arm) - mov r7, #__NR_compat_sigreturn - svc #0 - .fnend -SYM_CODE_END(__kernel_sigreturn_arm) - - .fnstart - .save {r0-r15} - .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET - nop -SYM_CODE_START(__kernel_rt_sigreturn_arm) - mov r7, #__NR_compat_rt_sigreturn - svc #0 - .fnend -SYM_CODE_END(__kernel_rt_sigreturn_arm) - - .thumb - .fnstart - .save {r0-r15} - .pad #COMPAT_SIGFRAME_REGS_OFFSET - nop -SYM_CODE_START(__kernel_sigreturn_thumb) - mov r7, #__NR_compat_sigreturn - svc #0 - .fnend -SYM_CODE_END(__kernel_sigreturn_thumb) - - .fnstart - .save {r0-r15} - .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET - nop -SYM_CODE_START(__kernel_rt_sigreturn_thumb) - mov r7, #__NR_compat_rt_sigreturn - svc #0 - .fnend -SYM_CODE_END(__kernel_rt_sigreturn_thumb) diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S index a3944927eaeb..337d03522048 100644 --- a/arch/arm64/kernel/vdso32/vdso.lds.S +++ b/arch/arm64/kernel/vdso32/vdso.lds.S @@ -64,19 +64,7 @@ VERSION __vdso_clock_gettime; __vdso_gettimeofday; __vdso_clock_getres; - __kernel_sigreturn_arm; - __kernel_sigreturn_thumb; - __kernel_rt_sigreturn_arm; - __kernel_rt_sigreturn_thumb; __vdso_clock_gettime64; local: *; }; } - -/* - * Make the sigreturn code visible to the kernel. - */ -VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm; -VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb; -VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm; -VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e631e6425165..1e93cfc7c47a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -404,11 +404,6 @@ void __init arm64_memblock_init(void) high_memory = __va(memblock_end_of_DRAM() - 1) + 1; dma_contiguous_reserve(arm64_dma32_phys_limit); - -#ifdef CONFIG_ARM64_4K_PAGES - hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); -#endif - } void __init bootmem_init(void) @@ -424,6 +419,16 @@ void __init bootmem_init(void) min_low_pfn = min; arm64_numa_init(); + + /* + * must be done after arm64_numa_init() which calls numa_init() to + * initialize node_online_map that gets used in hugetlb_cma_reserve() + * while allocating required CMA size across online nodes. + */ +#ifdef CONFIG_ARM64_4K_PAGES + hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); +#endif + /* * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 990929c8837e..1df25f26571d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -723,6 +723,7 @@ int kern_addr_valid(unsigned long addr) pmd_t *pmdp, pmd; pte_t *ptep, pte; + addr = arch_kasan_reset_tag(addr); if ((((long)addr) >> VA_BITS) != -1UL) return 0; diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c index 3c425b84e3be..b4a7ec1517ff 100644 --- a/arch/csky/kernel/ftrace.c +++ b/arch/csky/kernel/ftrace.c @@ -72,7 +72,8 @@ static int ftrace_check_current_nop(unsigned long hook) uint16_t olds[7]; unsigned long hook_pos = hook - 2; - if (probe_kernel_read((void *)olds, (void *)hook_pos, sizeof(nops))) + if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos, + sizeof(nops))) return -EFAULT; if (memcmp((void *)nops, (void *)olds, sizeof(nops))) { @@ -97,7 +98,7 @@ static int ftrace_modify_code(unsigned long hook, unsigned long target, make_jbsr(target, hook, call, nolr); - ret = probe_kernel_write((void *)hook_pos, enable ? call : nops, + ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops, sizeof(nops)); if (ret) return -EPERM; diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index cea15f2dd38d..3a033d2008b3 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -35,7 +35,7 @@ static inline void *dereference_function_descriptor(void *ptr) struct fdesc *desc = ptr; void *p; - if (!probe_kernel_address(&desc->ip, p)) + if (!get_kernel_nofault(p, (void *)&desc->ip)) ptr = p; return ptr; } diff --git a/arch/ia64/kernel/ftrace.c b/arch/ia64/kernel/ftrace.c index cee411e647ca..b2ab2d58fb30 100644 --- a/arch/ia64/kernel/ftrace.c +++ b/arch/ia64/kernel/ftrace.c @@ -108,7 +108,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, goto skip_check; /* read the text we want to modify */ - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; /* Make sure it is what we expect it to be */ @@ -117,7 +117,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, skip_check: /* replace the text with the new text */ - if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE)) + if (copy_to_kernel_nofault(((void *)ip), new_code, MCOUNT_INSN_SIZE)) return -EPERM; flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); @@ -129,7 +129,7 @@ static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr) unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE]; unsigned long ip = rec->ip; - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; if (rec->flags & FTRACE_FL_CONVERTED) { struct ftrace_call_insn *call_insn, *tmp_call; diff --git a/arch/ia64/kernel/unwind_i.h b/arch/ia64/kernel/unwind_i.h index 67994a7e5816..1dd57ba44327 100644 --- a/arch/ia64/kernel/unwind_i.h +++ b/arch/ia64/kernel/unwind_i.h @@ -42,7 +42,7 @@ enum unw_register_index { struct unw_info_block { u64 header; - u64 desc[0]; /* unwind descriptors */ + u64 desc[]; /* unwind descriptors */ /* personality routine and language-specific data follow behind descriptors */ }; diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 6cfae2411c04..d043c2f897fc 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -86,9 +86,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) goto out; } - if ((probe_kernel_read(&prev_insn, p->addr - 1, - sizeof(mips_instruction)) == 0) && - insn_has_delayslot(prev_insn)) { + if (copy_from_kernel_nofault(&prev_insn, p->addr - 1, + sizeof(mips_instruction)) == 0 && + insn_has_delayslot(prev_insn)) { pr_notice("Kprobes for branch delayslot are not supported\n"); ret = -EINVAL; goto out; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 521bd5891e84..666d3350b4ac 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -67,7 +67,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("vz_ghfc", vz_ghfc_exits), VCPU_STAT("vz_gpa", vz_gpa_exits), VCPU_STAT("vz_resvd", vz_resvd_exits), +#ifdef CONFIG_CPU_LOONGSON64 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), +#endif #endif VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll), diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index 22ab77ea27ad..3763b3f8c3db 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c @@ -131,13 +131,14 @@ static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn, unsigned long orig_insn[3]; if (validate) { - if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(orig_insn, (void *)pc, + MCOUNT_INSN_SIZE)) return -EFAULT; if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE)) return -EINVAL; } - if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE)) + if (copy_to_kernel_nofault((void *)pc, new_insn, MCOUNT_INSN_SIZE)) return -EPERM; return 0; diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index c152a68811dd..345727638d52 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -74,8 +74,11 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size) * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. */ + mmap_read_lock(&init_mm); error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, NULL); + mmap_read_unlock(&init_mm); + if (error) return ERR_PTR(error); return cpu_addr; @@ -85,9 +88,11 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size) { unsigned long va = (unsigned long)cpu_addr; + mmap_read_lock(&init_mm); /* walk_page_range shouldn't be able to fail here */ WARN_ON(walk_page_range(&init_mm, va, va + size, &clear_nocache_walk_ops, NULL)); + mmap_read_unlock(&init_mm); } void arch_sync_dma_for_device(phys_addr_t addr, size_t size, diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index b836fc61a24f..1df0f67ed667 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -172,7 +172,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ip = (void *)(rec->ip + 4 - size); - ret = probe_kernel_read(insn, ip, size); + ret = copy_from_kernel_nofault(insn, ip, size); if (ret) return ret; diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c index 664278db9b97..c4554ac13eac 100644 --- a/arch/parisc/kernel/kgdb.c +++ b/arch/parisc/kernel/kgdb.c @@ -154,8 +154,8 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { - int ret = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, - BREAK_INSTR_SIZE); + int ret = copy_from_kernel_nofault(bpt->saved_instr, + (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (ret) return ret; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 230a6422b99f..b7abb12edd3a 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -293,7 +293,7 @@ void *dereference_function_descriptor(void *ptr) Elf64_Fdesc *desc = ptr; void *p; - if (!probe_kernel_address(&desc->addr, p)) + if (!get_kernel_nofault(p, (void *)&desc->addr)) ptr = p; return ptr; } diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 94a9fe2702c2..4b75388190b4 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -57,7 +57,7 @@ void * memcpy(void * dst,const void *src, size_t count) EXPORT_SYMBOL(raw_copy_in_user); EXPORT_SYMBOL(memcpy); -bool probe_kernel_read_allowed(const void *unsafe_src, size_t size) +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { if ((unsigned long)unsafe_src < PAGE_SIZE) return false; diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index b56f14160ae5..b0afbdd07740 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp) *pmdp = __pmd(0); } -/* to find an entry in a page-table-directory */ -#define pgd_index(address) ((address) >> PGDIR_SHIFT) -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - /* * PTE updates. This function is called whenever an existing * valid PTE is updated. This does -not- include set_pte_at() @@ -230,6 +226,8 @@ static inline void pmd_clear(pmd_t *pmdp) * For other page sizes, we have a single entry in the table. */ #ifdef CONFIG_PPC_8xx +static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); + static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, unsigned long clr, unsigned long set, int huge) { @@ -237,7 +235,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p pte_basic_t old = pte_val(*p); pte_basic_t new = (old & ~(pte_basic_t)clr) | set; int num, i; - pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr); + pmd_t *pmd = pmd_off(mm, addr); if (!huge) num = PAGE_SIZE / SZ_4K; @@ -286,6 +284,16 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); } +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) +#define __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_t pte = {READ_ONCE(ptep->pte), 0, 0, 0}; + + return pte; +} +#endif + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index d19871763ed4..324d7b298ec3 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -85,7 +85,7 @@ static inline void *dereference_function_descriptor(void *ptr) struct ppc64_opd_entry *desc = ptr; void *p; - if (!probe_kernel_address(&desc->funcaddr, p)) + if (!get_kernel_nofault(p, (void *)&desc->funcaddr)) ptr = p; return ptr; } diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h new file mode 100644 index 000000000000..9ccecc1d6840 --- /dev/null +++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl + * + * (C) Copyright IBM 2020 + * + * Author: Vaibhav Jain + */ + +#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_ +#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_ + +#include +#include + +/* + * PDSM Envelope: + * + * The ioctl ND_CMD_CALL exchange data between user-space and kernel via + * envelope which consists of 2 headers sections and payload sections as + * illustrated below: + * +-----------------+---------------+---------------------------+ + * | 64-Bytes | 8-Bytes | Max 184-Bytes | + * +-----------------+---------------+---------------------------+ + * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD | + * +-----------------+---------------+---------------------------+ + * | nd_family | | | + * | nd_size_out | cmd_status | | + * | nd_size_in | reserved | nd_pdsm_payload | + * | nd_command | payload --> | | + * | nd_fw_size | | | + * | nd_payload ---> | | | + * +---------------+-----------------+---------------------------+ + * + * ND Header: + * This is the generic libnvdimm header described as 'struct nd_cmd_pkg' + * which is interpreted by libnvdimm before passed on to papr_scm. Important + * member fields used are: + * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM + * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0) + * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD + * 'nd_command' : (In) One of PAPR_PDSM_XXX + * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned + * + * PDSM Header: + * This is papr-scm specific header that precedes the payload. This is defined + * as nd_cmd_pdsm_pkg. Following fields aare available in this header: + * + * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM. + * 'reserved' : Not used, reserved for future and should be set to 0. + * 'payload' : A union of all the possible payload structs + * + * PDSM Payload: + * + * The layout of the PDSM Payload is defined by various structs shared between + * papr_scm and libndctl so that contents of payload can be interpreted. As such + * its defined as a union of all possible payload structs as + * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command' + * appropriate member of the union is accessed. + */ + +/* Max payload size that we can handle */ +#define ND_PDSM_PAYLOAD_MAX_SIZE 184 + +/* Max payload size that we can handle */ +#define ND_PDSM_HDR_SIZE \ + (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE) + +/* Various nvdimm health indicators */ +#define PAPR_PDSM_DIMM_HEALTHY 0 +#define PAPR_PDSM_DIMM_UNHEALTHY 1 +#define PAPR_PDSM_DIMM_CRITICAL 2 +#define PAPR_PDSM_DIMM_FATAL 3 + +/* + * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH + * Various flags indicate the health status of the dimm. + * + * extension_flags : Any extension fields present in the struct. + * dimm_unarmed : Dimm not armed. So contents wont persist. + * dimm_bad_shutdown : Previous shutdown did not persist contents. + * dimm_bad_restore : Contents from previous shutdown werent restored. + * dimm_scrubbed : Contents of the dimm have been scrubbed. + * dimm_locked : Contents of the dimm cant be modified until CEC reboot + * dimm_encrypted : Contents of dimm are encrypted. + * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX + */ +struct nd_papr_pdsm_health { + union { + struct { + __u32 extension_flags; + __u8 dimm_unarmed; + __u8 dimm_bad_shutdown; + __u8 dimm_bad_restore; + __u8 dimm_scrubbed; + __u8 dimm_locked; + __u8 dimm_encrypted; + __u16 dimm_health; + }; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; + }; +}; + +/* + * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel + * via 'nd_cmd_pkg.nd_command' member of the ioctl struct + */ +enum papr_pdsm { + PAPR_PDSM_MIN = 0x0, + PAPR_PDSM_HEALTH, + PAPR_PDSM_MAX, +}; + +/* Maximal union that can hold all possible payload types */ +union nd_pdsm_payload { + struct nd_papr_pdsm_health health; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; +} __packed; + +/* + * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm + * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command' + * that should always precede this struct when sent to papr_scm via CMD_CALL + * interface. + */ +struct nd_pkg_pdsm { + __s32 cmd_status; /* Out: Sub-cmd status returned back */ + __u16 reserved[2]; /* Ignored and to be set as '0' */ + union nd_pdsm_payload payload; +} __packed; + +#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e70ebb5c318c..fa080694e581 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -270,7 +270,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .endif - ld r10,PACA_EXGEN+EX_CTR(r13) + ld r10,IAREA+EX_CTR(r13) mtctr r10 BEGIN_FTR_SECTION ld r10,IAREA+EX_PPR(r13) @@ -298,7 +298,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) .if IKVM_SKIP 89: mtocrf 0x80,r9 - ld r10,PACA_EXGEN+EX_CTR(r13) + ld r10,IAREA+EX_CTR(r13) mtctr r10 ld r9,IAREA+EX_R9(r13) ld r10,IAREA+EX_R10(r13) diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 652b2852bea3..409080208a6c 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -421,7 +421,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) unsigned int instr; struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; - err = probe_kernel_address(addr, instr); + err = get_kernel_nofault(instr, (unsigned *) addr); if (err) return err; diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 6f96f65ebfe8..9cc792a3a6a9 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -289,7 +289,7 @@ int kprobe_handler(struct pt_regs *regs) if (!p) { unsigned int instr; - if (probe_kernel_address(addr, instr)) + if (get_kernel_nofault(instr, addr)) goto no_kprobe; if (instr != BREAKPOINT_INSTRUCTION) { diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f4c2fa190192..ae2b188365b1 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -756,7 +756,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr, stub = (struct ppc64_stub_entry *)addr; - if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) { + if (copy_from_kernel_nofault(&magic, &stub->magic, + sizeof(magic))) { pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name); return -EFAULT; } @@ -766,7 +767,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr, return -EFAULT; } - if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) { + if (copy_from_kernel_nofault(&funcdata, &stub->funcdata, + sizeof(funcdata))) { pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name); return -EFAULT; } diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7bb7faf84490..4650b9bb217f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1252,29 +1252,31 @@ struct task_struct *__switch_to(struct task_struct *prev, static void show_instructions(struct pt_regs *regs) { int i; + unsigned long nip = regs->nip; unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int)); printk("Instruction dump:"); + /* + * If we were executing with the MMU off for instructions, adjust pc + * rather than printing XXXXXXXX. + */ + if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) { + pc = (unsigned long)phys_to_virt(pc); + nip = (unsigned long)phys_to_virt(regs->nip); + } + for (i = 0; i < NR_INSN_TO_PRINT; i++) { int instr; if (!(i % 8)) pr_cont("\n"); -#if !defined(CONFIG_BOOKE) - /* If executing with the IMMU off, adjust pc rather - * than print XXXXXXXX. - */ - if (!(regs->msr & MSR_IR)) - pc = (unsigned long)phys_to_virt(pc); -#endif - if (!__kernel_text_address(pc) || - probe_kernel_address((const void *)pc, instr)) { + get_kernel_nofault(instr, (const void *)pc)) { pr_cont("XXXXXXXX "); } else { - if (regs->nip == pc) + if (nip == pc) pr_cont("<%08x> ", instr); else pr_cont("%08x ", instr); @@ -1305,7 +1307,8 @@ void show_user_instructions(struct pt_regs *regs) for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) { int instr; - if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) { + if (copy_from_user_nofault(&instr, (void __user *)pc, + sizeof(instr))) { seq_buf_printf(&s, "XXXXXXXX "); continue; } diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 5e399628f51a..c1fede6ec934 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -226,7 +226,7 @@ __ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; unsigned long tramp; - if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; /* Make sure that that this is still a 24bit jump */ @@ -249,7 +249,7 @@ __ftrace_make_nop(struct module *mod, pr_devel("ip:%lx jumps to %lx", ip, tramp); /* Find where the trampoline jumps to */ - if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { + if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) { pr_err("Failed to read %lx\n", tramp); return -EFAULT; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 3cb0c9843d01..6a73714759ba 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -40,7 +40,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, - __pa(to), __pa(from), n); + (to != NULL) ? __pa(to): 0, + (from != NULL) ? __pa(from): 0, n); quadrant = 1; if (!pid) @@ -64,9 +65,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, isync(); if (is_load) - ret = probe_user_read(to, (const void __user *)from, n); + ret = copy_from_user_nofault(to, (const void __user *)from, n); else - ret = probe_user_write((void __user *)to, from, n); + ret = copy_to_user_nofault((void __user *)to, from, n); /* switch the pid first to avoid running host with unallocated pid */ if (quadrant == 1 && pid != old_pid) diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c index aedfd6e31e53..9cc17eb62462 100644 --- a/arch/powerpc/lib/inst.c +++ b/arch/powerpc/lib/inst.c @@ -15,11 +15,11 @@ int probe_user_read_inst(struct ppc_inst *inst, unsigned int val, suffix; int err; - err = probe_user_read(&val, nip, sizeof(val)); + err = copy_from_user_nofault(&val, nip, sizeof(val)); if (err) return err; if (get_op(val) == OP_PREFIX) { - err = probe_user_read(&suffix, (void __user *)nip + 4, 4); + err = copy_from_user_nofault(&suffix, (void __user *)nip + 4, 4); *inst = ppc_inst_prefix(val, suffix); } else { *inst = ppc_inst(val); @@ -33,11 +33,11 @@ int probe_kernel_read_inst(struct ppc_inst *inst, unsigned int val, suffix; int err; - err = probe_kernel_read(&val, src, sizeof(val)); + err = copy_from_kernel_nofault(&val, src, sizeof(val)); if (err) return err; if (get_op(val) == OP_PREFIX) { - err = probe_kernel_read(&suffix, (void *)src + 4, 4); + err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4); *inst = ppc_inst_prefix(val, suffix); } else { *inst = ppc_inst(val); @@ -51,7 +51,7 @@ int probe_user_read_inst(struct ppc_inst *inst, unsigned int val; int err; - err = probe_user_read(&val, nip, sizeof(val)); + err = copy_from_user_nofault(&val, nip, sizeof(val)); if (!err) *inst = ppc_inst(val); @@ -64,7 +64,7 @@ int probe_kernel_read_inst(struct ppc_inst *inst, unsigned int val; int err; - err = probe_kernel_read(&val, src, sizeof(val)); + err = copy_from_kernel_nofault(&val, src, sizeof(val)); if (!err) *inst = ppc_inst(val); diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 4a75f2d9bf0e..bce0e5349978 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index 6f347fa29f41..9db7ada79d10 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c @@ -33,7 +33,8 @@ static unsigned int user_getsp32(unsigned int sp, int is_first) * which means that we've done all that we can do from * interrupt context. */ - if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame))) + if (copy_from_user_nofault(stack_frame, (void __user *)p, + sizeof(stack_frame))) return 0; if (!is_first) @@ -51,7 +52,8 @@ static unsigned long user_getsp64(unsigned long sp, int is_first) { unsigned long stack_frame[3]; - if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame))) + if (copy_from_user_nofault(stack_frame, (void __user *)sp, + sizeof(stack_frame))) return 0; if (!is_first) diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index f7d888d39cd3..542e68b8eae0 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -44,7 +44,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ((unsigned long)ptr & 3)) return -EFAULT; - rc = probe_user_read(ret, ptr, sizeof(*ret)); + rc = copy_from_user_nofault(ret, ptr, sizeof(*ret)); if (IS_ENABLED(CONFIG_PPC64) && rc) return read_user_stack_slow(ptr, ret, 4); diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c index 814d1c2c2b9c..fa2a1b83b9b0 100644 --- a/arch/powerpc/perf/callchain_64.c +++ b/arch/powerpc/perf/callchain_64.c @@ -50,7 +50,7 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) ((unsigned long)ptr & 7)) return -EFAULT; - if (!probe_user_read(ret, ptr, sizeof(*ret))) + if (!copy_from_user_nofault(ret, ptr, sizeof(*ret))) return 0; return read_user_stack_slow(ptr, ret, 8); diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 13b9dd5e4a76..cd6a742ac6ef 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -418,14 +418,16 @@ static __u64 power_pmu_bhrb_to(u64 addr) __u64 target; if (is_kernel_addr(addr)) { - if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) + if (copy_from_kernel_nofault(&instr, (void *)addr, + sizeof(instr))) return 0; return branch_target((struct ppc_inst *)&instr); } /* Userspace: need copy instruction here then translate it */ - if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr))) + if (copy_from_user_nofault(&instr, (unsigned int __user *)addr, + sizeof(instr))) return 0; target = branch_target((struct ppc_inst *)&instr); diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index cbee3666da07..abdef9bcf432 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -35,7 +35,7 @@ */ static void *spu_syscall_table[] = { -#define __SYSCALL(nr, entry) entry, +#define __SYSCALL(nr, entry) [nr] = entry, #include #undef __SYSCALL }; diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f35592423380..9c569078a09f 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -12,16 +12,57 @@ #include #include #include +#include #include +#include #define BIND_ANY_ADDR (~0ul) #define PAPR_SCM_DIMM_CMD_MASK \ ((1ul << ND_CMD_GET_CONFIG_SIZE) | \ (1ul << ND_CMD_GET_CONFIG_DATA) | \ - (1ul << ND_CMD_SET_CONFIG_DATA)) + (1ul << ND_CMD_SET_CONFIG_DATA) | \ + (1ul << ND_CMD_CALL)) +/* DIMM health bitmap bitmap indicators */ +/* SCM device is unable to persist memory contents */ +#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) +/* SCM device failed to persist memory contents */ +#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) +/* SCM device contents are persisted from previous IPL */ +#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) +/* SCM device contents are not persisted from previous IPL */ +#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) +/* SCM device memory life remaining is critically low */ +#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) +/* SCM device will be garded off next IPL due to failure */ +#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) +/* SCM contents cannot persist due to current platform health status */ +#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) +/* SCM device is unable to persist memory contents in certain conditions */ +#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) +/* SCM device is encrypted */ +#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) +/* SCM device has been scrubbed and locked */ +#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) + +/* Bits status indicators for health bitmap indicating unarmed dimm */ +#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +/* Bits status indicators for health bitmap indicating unflushed dimm */ +#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) + +/* Bits status indicators for health bitmap indicating unrestored dimm */ +#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) + +/* Bit status indicators for smart event notification */ +#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ + PAPR_PMEM_HEALTH_FATAL | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +/* private struct associated with each region */ struct papr_scm_priv { struct platform_device *pdev; struct device_node *dn; @@ -39,6 +80,15 @@ struct papr_scm_priv { struct resource res; struct nd_region *region; struct nd_interleave_set nd_set; + + /* Protect dimm health data from concurrent read/writes */ + struct mutex health_mutex; + + /* Last time the health information of the dimm was updated */ + unsigned long lasthealth_jiffies; + + /* Health information for the dimm */ + u64 health_bitmap; }; static int drc_pmem_bind(struct papr_scm_priv *p) @@ -144,6 +194,61 @@ static int drc_pmem_query_n_bind(struct papr_scm_priv *p) return drc_pmem_bind(p); } +/* + * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the + * health information. + */ +static int __drc_pmem_query_health(struct papr_scm_priv *p) +{ + unsigned long ret[PLPAR_HCALL_BUFSIZE]; + long rc; + + /* issue the hcall */ + rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index); + if (rc != H_SUCCESS) { + dev_err(&p->pdev->dev, + "Failed to query health information, Err:%ld\n", rc); + return -ENXIO; + } + + p->lasthealth_jiffies = jiffies; + p->health_bitmap = ret[0] & ret[1]; + + dev_dbg(&p->pdev->dev, + "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n", + ret[0], ret[1]); + + return 0; +} + +/* Min interval in seconds for assuming stable dimm health */ +#define MIN_HEALTH_QUERY_INTERVAL 60 + +/* Query cached health info and if needed call drc_pmem_query_health */ +static int drc_pmem_query_health(struct papr_scm_priv *p) +{ + unsigned long cache_timeout; + int rc; + + /* Protect concurrent modifications to papr_scm_priv */ + rc = mutex_lock_interruptible(&p->health_mutex); + if (rc) + return rc; + + /* Jiffies offset for which the health data is assumed to be same */ + cache_timeout = p->lasthealth_jiffies + + msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + + /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ + if (time_after(jiffies, cache_timeout)) + rc = __drc_pmem_query_health(p); + else + /* Assume cached health data is valid */ + rc = 0; + + mutex_unlock(&p->health_mutex); + return rc; +} static int papr_scm_meta_get(struct papr_scm_priv *p, struct nd_cmd_get_config_data_hdr *hdr) @@ -246,16 +351,250 @@ static int papr_scm_meta_set(struct papr_scm_priv *p, return 0; } +/* + * Do a sanity checks on the inputs args to dimm-control function and return + * '0' if valid. Validation of PDSM payloads happens later in + * papr_scm_service_pdsm. + */ +static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len) +{ + unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK; + struct nd_cmd_pkg *nd_cmd; + struct papr_scm_priv *p; + enum papr_pdsm pdsm; + + /* Only dimm-specific calls are supported atm */ + if (!nvdimm) + return -EINVAL; + + /* get the provider data from struct nvdimm */ + p = nvdimm_provider_data(nvdimm); + + if (!test_bit(cmd, &cmd_mask)) { + dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd); + return -EINVAL; + } + + /* For CMD_CALL verify pdsm request */ + if (cmd == ND_CMD_CALL) { + /* Verify the envelope and envelop size */ + if (!buf || + buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) { + dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n", + buf_len); + return -EINVAL; + } + + /* Verify that the nd_cmd_pkg.nd_family is correct */ + nd_cmd = (struct nd_cmd_pkg *)buf; + + if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) { + dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n", + nd_cmd->nd_family); + return -EINVAL; + } + + pdsm = (enum papr_pdsm)nd_cmd->nd_command; + + /* Verify if the pdsm command is valid */ + if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n", + pdsm); + return -EINVAL; + } + + /* Have enough space to hold returned 'nd_pkg_pdsm' header */ + if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n", + pdsm); + return -EINVAL; + } + } + + /* Let the command be further processed */ + return 0; +} + +/* Fetch the DIMM health info and populate it in provided package. */ +static int papr_pdsm_health(struct papr_scm_priv *p, + union nd_pdsm_payload *payload) +{ + int rc; + + /* Ensure dimm health mutex is taken preventing concurrent access */ + rc = mutex_lock_interruptible(&p->health_mutex); + if (rc) + goto out; + + /* Always fetch upto date dimm health data ignoring cached values */ + rc = __drc_pmem_query_health(p); + if (rc) { + mutex_unlock(&p->health_mutex); + goto out; + } + + /* update health struct with various flags derived from health bitmap */ + payload->health = (struct nd_papr_pdsm_health) { + .extension_flags = 0, + .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK), + .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK), + .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK), + .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED), + .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED), + .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED), + .dimm_health = PAPR_PDSM_DIMM_HEALTHY, + }; + + /* Update field dimm_health based on health_bitmap flags */ + if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL) + payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL; + else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL) + payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL; + else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY) + payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY; + + /* struct populated hence can release the mutex now */ + mutex_unlock(&p->health_mutex); + rc = sizeof(struct nd_papr_pdsm_health); + +out: + return rc; +} + +/* + * 'struct pdsm_cmd_desc' + * Identifies supported PDSMs' expected length of in/out payloads + * and pdsm service function. + * + * size_in : Size of input payload if any in the PDSM request. + * size_out : Size of output payload if any in the PDSM request. + * service : Service function for the PDSM request. Return semantics: + * rc < 0 : Error servicing PDSM and rc indicates the error. + * rc >=0 : Serviced successfully and 'rc' indicate number of + * bytes written to payload. + */ +struct pdsm_cmd_desc { + u32 size_in; + u32 size_out; + int (*service)(struct papr_scm_priv *dimm, + union nd_pdsm_payload *payload); +}; + +/* Holds all supported PDSMs' command descriptors */ +static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = { + [PAPR_PDSM_MIN] = { + .size_in = 0, + .size_out = 0, + .service = NULL, + }, + /* New PDSM command descriptors to be added below */ + + [PAPR_PDSM_HEALTH] = { + .size_in = 0, + .size_out = sizeof(struct nd_papr_pdsm_health), + .service = papr_pdsm_health, + }, + /* Empty */ + [PAPR_PDSM_MAX] = { + .size_in = 0, + .size_out = 0, + .service = NULL, + }, +}; + +/* Given a valid pdsm cmd return its command descriptor else return NULL */ +static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd) +{ + if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors)) + return &__pdsm_cmd_descriptors[cmd]; + + return NULL; +} + +/* + * For a given pdsm request call an appropriate service function. + * Returns errors if any while handling the pdsm command package. + */ +static int papr_scm_service_pdsm(struct papr_scm_priv *p, + struct nd_cmd_pkg *pkg) +{ + /* Get the PDSM header and PDSM command */ + struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload; + enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command; + const struct pdsm_cmd_desc *pdsc; + int rc; + + /* Fetch corresponding pdsm descriptor for validation and servicing */ + pdsc = pdsm_cmd_desc(pdsm); + + /* Validate pdsm descriptor */ + /* Ensure that reserved fields are 0 */ + if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n", + pdsm); + return -EINVAL; + } + + /* If pdsm expects some input, then ensure that the size_in matches */ + if (pdsc->size_in && + pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n", + pdsm, pkg->nd_size_in); + return -EINVAL; + } + + /* If pdsm wants to return data, then ensure that size_out matches */ + if (pdsc->size_out && + pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n", + pdsm, pkg->nd_size_out); + return -EINVAL; + } + + /* Service the pdsm */ + if (pdsc->service) { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm); + + rc = pdsc->service(p, &pdsm_pkg->payload); + + if (rc < 0) { + /* error encountered while servicing pdsm */ + pdsm_pkg->cmd_status = rc; + pkg->nd_fw_size = ND_PDSM_HDR_SIZE; + } else { + /* pdsm serviced and 'rc' bytes written to payload */ + pdsm_pkg->cmd_status = 0; + pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc; + } + } else { + dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n", + pdsm); + pdsm_pkg->cmd_status = -ENOENT; + pkg->nd_fw_size = ND_PDSM_HDR_SIZE; + } + + return pdsm_pkg->cmd_status; +} + static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { struct nd_cmd_get_config_size *get_size_hdr; + struct nd_cmd_pkg *call_pkg = NULL; struct papr_scm_priv *p; + int rc; - /* Only dimm-specific calls are supported atm */ - if (!nvdimm) - return -EINVAL; + rc = is_cmd_valid(nvdimm, cmd, buf, buf_len); + if (rc) { + pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc); + return rc; + } + + /* Use a local variable in case cmd_rc pointer is NULL */ + if (!cmd_rc) + cmd_rc = &rc; p = nvdimm_provider_data(nvdimm); @@ -277,7 +616,13 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, *cmd_rc = papr_scm_meta_set(p, buf); break; + case ND_CMD_CALL: + call_pkg = (struct nd_cmd_pkg *)buf; + *cmd_rc = papr_scm_service_pdsm(p, call_pkg); + break; + default: + dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd); return -EINVAL; } @@ -286,6 +631,64 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, return 0; } +static ssize_t flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm *dimm = to_nvdimm(dev); + struct papr_scm_priv *p = nvdimm_provider_data(dimm); + struct seq_buf s; + u64 health; + int rc; + + rc = drc_pmem_query_health(p); + if (rc) + return rc; + + /* Copy health_bitmap locally, check masks & update out buffer */ + health = READ_ONCE(p->health_bitmap); + + seq_buf_init(&s, buf, PAGE_SIZE); + if (health & PAPR_PMEM_UNARMED_MASK) + seq_buf_printf(&s, "not_armed "); + + if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK) + seq_buf_printf(&s, "flush_fail "); + + if (health & PAPR_PMEM_BAD_RESTORE_MASK) + seq_buf_printf(&s, "restore_fail "); + + if (health & PAPR_PMEM_ENCRYPTED) + seq_buf_printf(&s, "encrypted "); + + if (health & PAPR_PMEM_SMART_EVENT_MASK) + seq_buf_printf(&s, "smart_notify "); + + if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED) + seq_buf_printf(&s, "scrubbed locked "); + + if (seq_buf_used(&s)) + seq_buf_printf(&s, "\n"); + + return seq_buf_used(&s); +} +DEVICE_ATTR_RO(flags); + +/* papr_scm specific dimm attributes */ +static struct attribute *papr_nd_attributes[] = { + &dev_attr_flags.attr, + NULL, +}; + +static struct attribute_group papr_nd_attribute_group = { + .name = "papr", + .attrs = papr_nd_attributes, +}; + +static const struct attribute_group *papr_nd_attr_groups[] = { + &papr_nd_attribute_group, + NULL, +}; + static int papr_scm_nvdimm_init(struct papr_scm_priv *p) { struct device *dev = &p->pdev->dev; @@ -312,8 +715,8 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p) dimm_flags = 0; set_bit(NDD_LABELING, &dimm_flags); - p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags, - PAPR_SCM_DIMM_CMD_MASK, 0, NULL); + p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups, + dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL); if (!p->nvdimm) { dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn); goto err; @@ -399,6 +802,9 @@ static int papr_scm_probe(struct platform_device *pdev) if (!p) return -ENOMEM; + /* Initialize the dimm mutex */ + mutex_init(&p->health_mutex); + /* optional DT properties */ of_property_read_u32(dn, "ibm,metadata-size", &metadata_size); diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 4a8874bc1057..040b9d01c079 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -1066,10 +1066,10 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs) if (is_in_pci_mem_space(addr)) { if (user_mode(regs)) - ret = probe_user_read(&inst, (void __user *)regs->nip, - sizeof(inst)); + ret = copy_from_user_nofault(&inst, + (void __user *)regs->nip, sizeof(inst)); else - ret = probe_kernel_address((void *)regs->nip, inst); + ret = get_kernel_nofault(inst, (void *)regs->nip); if (!ret && mcheck_handle_load(regs, inst)) { regs->nip += 4; diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index d969bab4a26b..262e5bbb2776 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -179,7 +179,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -224,7 +224,7 @@ RISCV_ACQUIRE_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -270,7 +270,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -316,7 +316,7 @@ " fence rw, rw\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 08396614d6f4..2ff63d0cbb50 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -38,7 +38,8 @@ static int ftrace_check_current_call(unsigned long hook_pos, * Read the text we want to modify; * return must be -EFAULT on read error */ - if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(replaced, (void *)hook_pos, + MCOUNT_INSN_SIZE)) return -EFAULT; /* diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index f16ade84a11f..c3275f42d1ac 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -62,7 +62,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr) unsigned int rs1_num, rs2_num; int op_code; - if (probe_kernel_address((void *)pc, op_code)) + if (get_kernel_nofault(op_code, (void *)pc)) return -EINVAL; if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) { @@ -146,14 +146,14 @@ int do_single_step(struct pt_regs *regs) return error; /* Store the op code in the stepped address */ - error = probe_kernel_address((void *)addr, stepped_opcode); + error = get_kernel_nofault(stepped_opcode, (void *)addr); if (error) return error; stepped_address = addr; /* Replace the op code with the break instruction */ - error = probe_kernel_write((void *)stepped_address, + error = copy_to_kernel_nofault((void *)stepped_address, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); /* Flush and return */ @@ -173,7 +173,7 @@ int do_single_step(struct pt_regs *regs) static void undo_single_step(struct pt_regs *regs) { if (stepped_opcode != 0) { - probe_kernel_write((void *)stepped_address, + copy_to_kernel_nofault((void *)stepped_address, (void *)&stepped_opcode, BREAK_INSTR_SIZE); flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE); diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index d4a64dfed342..3fe7a5296aa5 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -63,7 +63,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) waddr = patch_map(addr, FIX_TEXT_POKE0); - ret = probe_kernel_write(waddr, insn, len); + ret = copy_to_kernel_nofault(waddr, insn, len); patch_unmap(FIX_TEXT_POKE0); @@ -76,7 +76,7 @@ NOKPROBE_SYMBOL(patch_insn_write); #else static int patch_insn_write(void *addr, const void *insn, size_t len) { - return probe_kernel_write(addr, insn, len); + return copy_to_kernel_nofault(addr, insn, len); } NOKPROBE_SYMBOL(patch_insn_write); #endif /* CONFIG_MMU */ diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index f3619f59d85c..12f8a7fce78b 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -8,6 +8,7 @@ #include #include #include +#include static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + + if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) + if (unlikely(!(prot & PROT_READ))) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index ecec1778e3a4..7d95cce5e47c 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -137,7 +137,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc) { bug_insn_t insn; - if (probe_kernel_address((bug_insn_t *)pc, insn)) + if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; return GET_INSN_LENGTH(insn); @@ -165,7 +165,7 @@ int is_valid_bugaddr(unsigned long pc) if (pc < VMALLOC_START) return 0; - if (probe_kernel_address((bug_insn_t *)pc, insn)) + if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) return (insn == __BUG_INSN_32); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 38ba55b0eb9d..e4c7c2c8a02f 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -17,7 +17,7 @@ vdso-syms += flush_icache obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o ifneq ($(c-gettimeofday-y),) - CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) + CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) endif # Build rules @@ -27,6 +27,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-y += vdso.o vdso-syms.o CPPFLAGS_vdso.lds += -P -C -U$(ARCH) +# Disable -pg to prevent insert call site +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os + # Disable gcov profiling for VDSO code GCOV_PROFILE := n diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c index d264943e2e47..cc0d80699c31 100644 --- a/arch/riscv/kernel/vdso/vgettimeofday.c +++ b/arch/riscv/kernel/vdso/vgettimeofday.c @@ -9,16 +9,22 @@ #include #include +extern +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } +extern +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } +extern +int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { return __cvdso_clock_getres(clock_id, res); diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index ec2c70f84994..289a9a5ea5b5 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -151,6 +151,7 @@ int set_memory_nx(unsigned long addr, int numpages) int set_direct_map_invalid_noflush(struct page *page) { + int ret; unsigned long start = (unsigned long)page_address(page); unsigned long end = start + PAGE_SIZE; struct pageattr_masks masks = { @@ -158,11 +159,16 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(_PAGE_PRESENT) }; - return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_lock(&init_mm); + ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_unlock(&init_mm); + + return ret; } int set_direct_map_default_noflush(struct page *page) { + int ret; unsigned long start = (unsigned long)page_address(page); unsigned long end = start + PAGE_SIZE; struct pageattr_masks masks = { @@ -170,7 +176,11 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(0) }; - return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_lock(&init_mm); + ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_unlock(&init_mm); + + return ret; } void __kernel_map_pages(struct page *page, int numpages, int enable) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 194824932a60..c7d7ede6300c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -462,6 +462,7 @@ config NUMA config NODES_SHIFT int + depends on NEED_MULTIPLE_NODES default "1" config SCHED_SMT diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index d977643fa627..e1ae23911ccd 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c @@ -693,7 +693,7 @@ static ssize_t prng_chunksize_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size); + return scnprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size); } static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL); @@ -712,7 +712,7 @@ static ssize_t prng_counter_show(struct device *dev, counter = prng_data->prngws.byte_counter; mutex_unlock(&prng_data->mutex); - return snprintf(buf, PAGE_SIZE, "%llu\n", counter); + return scnprintf(buf, PAGE_SIZE, "%llu\n", counter); } static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL); @@ -721,7 +721,7 @@ static ssize_t prng_errorflag_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag); + return scnprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag); } static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL); @@ -731,9 +731,9 @@ static ssize_t prng_mode_show(struct device *dev, char *buf) { if (prng_mode == PRNG_MODE_TDES) - return snprintf(buf, PAGE_SIZE, "TDES\n"); + return scnprintf(buf, PAGE_SIZE, "TDES\n"); else - return snprintf(buf, PAGE_SIZE, "SHA512\n"); + return scnprintf(buf, PAGE_SIZE, "SHA512\n"); } static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL); @@ -756,7 +756,7 @@ static ssize_t prng_reseed_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit); + return scnprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit); } static ssize_t prng_reseed_limit_store(struct device *dev, struct device_attribute *attr, @@ -787,7 +787,7 @@ static ssize_t prng_strength_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "256\n"); + return scnprintf(buf, PAGE_SIZE, "256\n"); } static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL); diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index f073292e9fdb..d9d5de0f67ff 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -33,7 +33,17 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0; + unsigned long error = regs->gprs[2]; +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) { + /* + * Sign-extend the value so (int)-EFOO becomes (long)-EFOO + * and will match correctly in comparisons. + */ + error = (long)(int)error; + } +#endif + return IS_ERR_VALUE(error) ? error : 0; } static inline long syscall_get_return_value(struct task_struct *task, diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index 3bcfdeb01395..0cd085cdeb4f 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -36,6 +36,7 @@ struct vdso_data { __u32 tk_shift; /* Shift used for xtime_nsec 0x60 */ __u32 ts_dir; /* TOD steering direction 0x64 */ __u64 ts_end; /* TOD steering end 0x68 */ + __u32 hrtimer_res; /* hrtimer resolution 0x70 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 165031bd3370..5d8cc1864566 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir); OFFSET(__VDSO_TS_END, vdso_data, ts_end); + OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res); OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); @@ -86,7 +87,6 @@ int main(void) DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); - DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); BLANK(); /* idle data offsets */ diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 636446003a06..263075a1af36 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas) if (!areas) goto fail_malloc_areas; for (i = 0; i < nr_areas; i++) { + /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */ areas[i] = kmalloc_array(pages_per_area, sizeof(debug_entry_t *), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!areas[i]) goto fail_malloc_areas2; for (j = 0; j < pages_per_area; j++) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cd241ee66eff..078277231858 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -170,6 +170,8 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; + if (IS_ENABLED(CONFIG_KASAN)) + psw.mask |= PSW_MASK_DAT; psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; psw.addr = (unsigned long) s390_base_pgm_handler; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 50ff6dd0f995..969b35b177dd 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -378,9 +378,9 @@ ENTRY(system_call) stmg %r8,%r15,__LC_SAVE_AREA_SYNC BPOFF lg %r12,__LC_CURRENT - lghi %r13,__TASK_thread lghi %r14,_PIF_SYSCALL .Lsysc_per: + lghi %r13,__TASK_thread lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER @@ -401,9 +401,9 @@ ENTRY(system_call) jnz .Lsysc_nr_ok # svc 0: system call number in %r1 llgfr %r1,%r1 # clear high word in r1 + sth %r1,__PT_INT_CODE+2(%r11) cghi %r1,NR_syscalls jnl .Lsysc_nr_ok - sth %r1,__PT_INT_CODE+2(%r11) slag %r8,%r1,3 .Lsysc_nr_ok: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 44e01dd1e624..b388e87a08bf 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -83,7 +83,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, { struct ftrace_insn orig, new, old; - if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) + if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; if (addr == MCOUNT_ADDR) { /* Initial code replacement */ @@ -105,7 +105,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { struct ftrace_insn orig, new, old; - if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) + if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; /* Replace nop with an ftrace call. */ ftrace_generate_nop_insn(&orig); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index ccea9a245867..90a2a17239b0 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -181,7 +181,7 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ struct kobj_attribute *attr, \ char *page) \ { \ - return snprintf(page, PAGE_SIZE, _format, ##args); \ + return scnprintf(page, PAGE_SIZE, _format, ##args); \ } #define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \ diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index ce60a459a143..3cc15c066298 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -323,6 +323,25 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } +static void fixup_int_code(struct task_struct *child, addr_t data) +{ + struct pt_regs *regs = task_pt_regs(child); + int ilc = regs->int_code >> 16; + u16 insn; + + if (ilc > 6) + return; + + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) + return; + + /* double check that tracee stopped on svc instruction */ + if ((insn >> 8) != 0xa) + return; + + regs->int_code = 0x20000 | (data & 0xffff); +} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -334,7 +353,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) struct user *dummy = NULL; addr_t offset; + if (addr < (addr_t) &dummy->regs.acrs) { + struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack */ @@ -352,7 +373,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* Invalid addressing mode bits */ return -EINVAL; } - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct user, regs.gprs[2])) + fixup_int_code(child, data); + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* @@ -718,6 +743,10 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct compat_user, regs.gprs[2])) + fixup_int_code(child, data); /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } @@ -837,40 +866,66 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { unsigned long mask = -1UL; + long ret = -1; + + if (is_compat_task()) + mask = 0xffffffff; /* * The sysc_tracesys code in entry.S stored the system * call number to gprs[2]. */ if (test_thread_flag(TIF_SYSCALL_TRACE) && - (tracehook_report_syscall_entry(regs) || - regs->gprs[2] >= NR_syscalls)) { + tracehook_report_syscall_entry(regs)) { /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip + * Tracing decided this syscall should not happen. Skip * the system call and the system call restart handling. */ - clear_pt_regs_flag(regs, PIF_SYSCALL); - return -1; + goto skip; } +#ifdef CONFIG_SECCOMP /* Do the secure computing check after ptrace. */ - if (secure_computing()) { - /* seccomp failures shouldn't expose any additional code. */ - return -1; + if (unlikely(test_thread_flag(TIF_SECCOMP))) { + struct seccomp_data sd; + + if (is_compat_task()) { + sd.instruction_pointer = regs->psw.addr & 0x7fffffff; + sd.arch = AUDIT_ARCH_S390; + } else { + sd.instruction_pointer = regs->psw.addr; + sd.arch = AUDIT_ARCH_S390X; + } + + sd.nr = regs->int_code & 0xffff; + sd.args[0] = regs->orig_gpr2 & mask; + sd.args[1] = regs->gprs[3] & mask; + sd.args[2] = regs->gprs[4] & mask; + sd.args[3] = regs->gprs[5] & mask; + sd.args[4] = regs->gprs[6] & mask; + sd.args[5] = regs->gprs[7] & mask; + + if (__secure_computing(&sd) == -1) + goto skip; } +#endif /* CONFIG_SECCOMP */ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->gprs[2]); + trace_sys_enter(regs, regs->int_code & 0xffff); - if (is_compat_task()) - mask = 0xffffffff; - audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask, + audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, regs->gprs[3] &mask, regs->gprs[4] &mask, regs->gprs[5] &mask); + if ((signed long)regs->gprs[2] >= NR_syscalls) { + regs->gprs[2] = -ENOSYS; + ret = -ENOSYS; + } return regs->gprs[2]; +skip: + clear_pt_regs_flag(regs, PIF_SYSCALL); + return ret; } asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index f9d070d016e3..b1113b519432 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -301,6 +301,7 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->tk_mult = tk->tkr_mono.mult; vdso_data->tk_shift = tk->tkr_mono.shift; + vdso_data->hrtimer_res = hrtimer_resolution; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 66e89b2866d7..c296e5c8dbf9 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -331,7 +331,7 @@ EXPORT_SYMBOL_GPL(arch_make_page_accessible); static ssize_t uv_query_facilities(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n", + return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n", uv_info.inst_calls_list[0], uv_info.inst_calls_list[1], uv_info.inst_calls_list[2], @@ -344,7 +344,7 @@ static struct kobj_attribute uv_query_facilities_attr = static ssize_t uv_query_max_guest_cpus(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%d\n", + return scnprintf(page, PAGE_SIZE, "%d\n", uv_info.max_guest_cpus); } @@ -354,7 +354,7 @@ static struct kobj_attribute uv_query_max_guest_cpus_attr = static ssize_t uv_query_max_guest_vms(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%d\n", + return scnprintf(page, PAGE_SIZE, "%d\n", uv_info.max_num_sec_conf); } @@ -364,7 +364,7 @@ static struct kobj_attribute uv_query_max_guest_vms_attr = static ssize_t uv_query_max_guest_addr(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - return snprintf(page, PAGE_SIZE, "%lx\n", + return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.max_sec_stor_addr); } diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index bec19e7e6e1c..4a66a1cb919b 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin -KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ - -Wl,--hash-style=both +ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \ + --hash-style=both --build-id -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) @@ -37,8 +37,8 @@ KASAN_SANITIZE := n $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE - $(call if_changed,vdso64ld) +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE + $(call if_changed,ld) # strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE $(call if_changed_dep,vdso64as) # actual build commands -quiet_cmd_vdso64ld = VDSO64L $@ - cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S index 081435398e0a..0c79caa32b59 100644 --- a/arch/s390/kernel/vdso64/clock_getres.S +++ b/arch/s390/kernel/vdso64/clock_getres.S @@ -17,12 +17,14 @@ .type __kernel_clock_getres,@function __kernel_clock_getres: CFI_STARTPROC - larl %r1,4f + larl %r1,3f + lg %r0,0(%r1) cghi %r2,__CLOCK_REALTIME_COARSE je 0f cghi %r2,__CLOCK_MONOTONIC_COARSE je 0f - larl %r1,3f + larl %r1,_vdso_data + llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1) cghi %r2,__CLOCK_REALTIME je 0f cghi %r2,__CLOCK_MONOTONIC @@ -36,7 +38,6 @@ __kernel_clock_getres: jz 2f 0: ltgr %r3,%r3 jz 1f /* res == NULL */ - lg %r0,0(%r1) xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ stg %r0,8(%r3) /* store tp->tv_usec */ 1: lghi %r2,0 @@ -45,6 +46,5 @@ __kernel_clock_getres: svc 0 br %r14 CFI_ENDPROC -3: .quad __CLOCK_REALTIME_RES -4: .quad __CLOCK_COARSE_RES +3: .quad __CLOCK_COARSE_RES .size __kernel_clock_getres,.-__kernel_clock_getres diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 6a24751557f0..d53c2e2ea1fd 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -105,7 +105,7 @@ static int bad_address(void *p) { unsigned long dummy; - return probe_kernel_address((unsigned long *)p, dummy); + return get_kernel_nofault(dummy, (unsigned long *)p); } static void dump_pagetable(unsigned long asce, unsigned long address) diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 1b04270e5460..0646c5961846 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -119,7 +119,7 @@ static void ftrace_mod_code(void) * But if one were to fail, then they all should, and if one were * to succeed, then they all should. */ - mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, + mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode, MCOUNT_INSN_SIZE); /* if we fail, then kill any new writers */ @@ -203,7 +203,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, */ /* read the text we want to modify */ - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; /* Make sure it is what we expect it to be */ @@ -268,7 +268,7 @@ static int ftrace_mod(unsigned long ip, unsigned long old_addr, { unsigned char code[MCOUNT_INSN_SIZE]; - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; if (old_addr != __raw_readl((unsigned long *)code)) diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index a33025451fcd..9c3d32b80038 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -118,7 +118,7 @@ int is_valid_bugaddr(unsigned long addr) if (addr < PAGE_OFFSET) return 0; - if (probe_kernel_address((insn_size_t *)addr, opcode)) + if (get_kernel_nofault(opcode, (insn_size_t *)addr)) return 0; if (opcode == TRAPA_BUG_OPCODE) return 1; diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c index e929c0966696..8ccd56813f68 100644 --- a/arch/um/kernel/maccess.c +++ b/arch/um/kernel/maccess.c @@ -7,7 +7,7 @@ #include #include -bool probe_kernel_read_allowed(const void *src, size_t size) +bool copy_from_kernel_nofault_allowed(const void *src, size_t size) { void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6a0cc524882d..883da0abf779 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -67,7 +67,7 @@ config X86 select ARCH_HAS_FILTER_PGPROT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL - select ARCH_HAS_KCOV if X86_64 + select ARCH_HAS_KCOV if X86_64 && STACK_VALIDATION select ARCH_HAS_MEM_ENCRYPT select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index e821a7d7d5c4..97d37f0a34f5 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32) * We place all of the values on our mini stack so lret can * used to perform that far jump. */ - pushl $__KERNEL_CS leal startup_64(%ebp), %eax #ifdef CONFIG_EFI_MIXED movl efi32_boot_args(%ebp), %edi @@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32) movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer cmpl $0, %edx jnz 1f + /* + * efi_pe_entry uses MS calling convention, which requires 32 bytes of + * shadow space on the stack even if all arguments are passed in + * registers. We also need an additional 8 bytes for the space that + * would be occupied by the return address, and this also results in + * the correct stack alignment for entry. + */ + subl $40, %esp leal efi_pe_entry(%ebp), %eax movl %edi, %ecx // MS calling convention movl %esi, %edx 1: #endif + pushl $__KERNEL_CS pushl %eax /* Enter paged protected Mode, activating Long Mode */ @@ -784,6 +792,7 @@ SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0) SYM_DATA_START_LOCAL(boot_stack) .fill BOOT_STACK_SIZE, 1, 0 + .balign 16 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end) /* diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile index 12c42eba77ec..9933c0e8e97a 100644 --- a/arch/x86/events/Makefile +++ b/arch/x86/events/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y += core.o probe.o -obj-$(PERF_EVENTS_INTEL_RAPL) += rapl.o +obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += rapl.o obj-y += amd/ obj-$(CONFIG_X86_LOCAL_APIC) += msr.o obj-$(CONFIG_CPU_SUP_INTEL) += intel/ diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a54c6a401581..2bdc72e6890e 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -375,7 +375,9 @@ void __init hyperv_init(void) guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id); - hv_hypercall_pg = vmalloc_exec(PAGE_SIZE); + hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, + VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX, + VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __func__); if (hv_hypercall_pg == NULL) { wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); goto remove_cpuhp_state; diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 35460fef39b8..0367efdc5b7a 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr) return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); } -static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) +static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) { - /* - * Because this is a plain access, we need to disable KCSAN here to - * avoid double instrumentation via instrumented bitops. - */ return ((1UL << (nr & (BITS_PER_LONG-1))) & (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; } diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index fb34ff641e0a..028189575560 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -75,6 +75,12 @@ do { \ unreachable(); \ } while (0) +/* + * This instrumentation_begin() is strictly speaking incorrect; but it + * suppresses the complaints from WARN()s in noinstr code. If such a WARN() + * were to trigger, we'd rather wreck the machine in an attempt to get the + * message out than not know about it. + */ #define __WARN_FLAGS(flags) \ do { \ instrumentation_begin(); \ diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index dd17c2da1af5..da78ccbd493b 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip) return false; } #endif +#ifdef CONFIG_IA32_FEAT_CTL +void init_ia32_feat_ctl(struct cpuinfo_x86 *c); +#else +static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} +#endif #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h index 6722ffcef2e6..3afa990d756b 100644 --- a/arch/x86/include/asm/cpumask.h +++ b/arch/x86/include/asm/cpumask.h @@ -11,5 +11,23 @@ extern cpumask_var_t cpu_sibling_setup_mask; extern void setup_cpu_local_masks(void); +/* + * NMI and MCE exceptions need cpu_is_offline() _really_ early, + * provide an arch_ special for them to avoid instrumentation. + */ +#if NR_CPUS > 1 +static __always_inline bool arch_cpu_online(int cpu) +{ + return arch_test_bit(cpu, cpumask_bits(cpu_online_mask)); +} +#else +static __always_inline bool arch_cpu_online(int cpu) +{ + return cpu == 0; +} +#endif + +#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu)) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_CPUMASK_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f8998e97457f..be5363b21540 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -943,7 +943,7 @@ struct kvm_arch { atomic_t vapics_in_nmi_mode; struct mutex apic_map_lock; struct kvm_apic_map *apic_map; - bool apic_map_dirty; + atomic_t apic_map_dirty; bool apic_access_page_done; unsigned long apicv_inhibit_reasons; @@ -1220,7 +1220,7 @@ struct kvm_x86_ops { void (*enable_log_dirty_pt_masked)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t offset, unsigned long mask); - int (*write_log_dirty)(struct kvm_vcpu *vcpu); + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 73d997aa2966..e039a933aca3 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -25,8 +25,6 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 -u32 get_umwait_control_msr(void); - static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 2da1f95b88d7..816b31c68550 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -194,6 +194,7 @@ enum page_cache_mode { #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0) #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC) #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G) +#define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G) #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC) #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G) #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G) @@ -219,6 +220,7 @@ enum page_cache_mode { #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC) #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC) #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0) +#define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC) #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC) #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC) #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 42cd333616c4..03b7c4ca425a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -370,7 +370,7 @@ struct x86_hw_tss { #define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1) struct entry_stack { - unsigned long words[64]; + char stack[PAGE_SIZE]; }; struct entry_stack_page { diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index ebedeab48704..255b2dde2c1b 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -278,7 +278,7 @@ static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs } /* To avoid include hell, we can't include uaccess.h */ -extern long probe_kernel_read(void *dst, const void *src, size_t size); +extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size); /** * regs_get_kernel_stack_nth() - get Nth entry of the stack @@ -298,7 +298,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, addr = regs_get_kernel_stack_nth_addr(regs, n); if (addr) { - ret = probe_kernel_read(&val, addr, sizeof(val)); + ret = copy_from_kernel_nofault(&val, addr, sizeof(val)); if (!ret) return val; } diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 426792565d86..c5cf336e5077 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 043d93cdcaad..95c090a45b4b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -347,6 +347,9 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_clear_bits(X86_CR4_UMIP); } +/* These bits should not change their value after CPU init is finished. */ +static const unsigned long cr4_pinned_mask = + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); static unsigned long cr4_pinned_bits __ro_after_init; @@ -371,20 +374,20 @@ EXPORT_SYMBOL(native_write_cr0); void native_write_cr4(unsigned long val) { - unsigned long bits_missing = 0; + unsigned long bits_changed = 0; set_register: asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); if (static_branch_likely(&cr_pinning)) { - if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { - bits_missing = ~val & cr4_pinned_bits; - val |= bits_missing; + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { + bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; + val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; goto set_register; } - /* Warn after we've set the missing bits. */ - WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", - bits_missing); + /* Warn after we've corrected the changed bits. */ + WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", + bits_changed); } } #if IS_MODULE(CONFIG_LKDTM) @@ -419,7 +422,7 @@ void cr4_init(void) if (boot_cpu_has(X86_FEATURE_PCID)) cr4 |= X86_CR4_PCIDE; if (static_branch_likely(&cr_pinning)) - cr4 |= cr4_pinned_bits; + cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; __write_cr4(cr4); @@ -434,10 +437,7 @@ void cr4_init(void) */ static void __init setup_cr_pinning(void) { - unsigned long mask; - - mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); - cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; static_key_enable(&cr_pinning.key); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index fb538fccd24c..9d033693519a 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -81,8 +81,4 @@ extern void update_srbds_msr(void); extern u64 x86_read_arch_cap_msr(void); -#ifdef CONFIG_IA32_FEAT_CTL -void init_ia32_feat_ctl(struct cpuinfo_x86 *c); -#endif - #endif /* ARCH_X86_CPU_H */ diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index ce9120c4f740..fbe89a92ff36 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1083,7 +1083,7 @@ static noinstr bool mce_check_crashing_cpu(void) { unsigned int cpu = smp_processor_id(); - if (cpu_is_offline(cpu) || + if (arch_cpu_is_offline(cpu) || (crashing_cpu != -1 && crashing_cpu != cpu)) { u64 mcgstatus; diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 12f967c6b603..6a9df71c1b9e 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -981,10 +981,10 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_max_rmid = ecx; c->x86_cache_occ_scale = ebx; - if (c->x86_vendor == X86_VENDOR_INTEL) - c->x86_cache_mbm_width_offset = eax & 0xff; - else - c->x86_cache_mbm_width_offset = -1; + c->x86_cache_mbm_width_offset = eax & 0xff; + + if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index f20a47d120b1..5ffa32256b3b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -37,6 +37,7 @@ #define MBA_IS_LINEAR 0x4 #define MBA_MAX_MBPS U32_MAX #define MAX_MBA_BW_AMD 0x800 +#define MBM_CNTR_WIDTH_OFFSET_AMD 20 #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 23b4b61319d3..3f844f14fc0a 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { _r_cdp = NULL; + _d_cdp = NULL; ret = -EINVAL; } diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index 300e3fd5ade3..ec8064c0ae03 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -18,12 +18,6 @@ */ static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); -u32 get_umwait_control_msr(void) -{ - return umwait_control_cached; -} -EXPORT_SYMBOL_GPL(get_umwait_control_msr); - /* * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * hardware or BIOS before kernel boot. diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index df1358ba622b..05fa4ef63490 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -2,6 +2,7 @@ #include #include +#include #include #include "cpu.h" diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 456511b2284e..b037cfa7c0c5 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -106,7 +106,7 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl) bad_ip = user_mode(regs) && __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); - if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue, + if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue, OPCODE_BUFSIZE)) { printk("%sCode: Bad RIP value.\n", loglvl); } else { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index c84d28e90a58..51504566b3a6 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -86,7 +86,7 @@ static int ftrace_verify_code(unsigned long ip, const char *old_code) * sure what we read is what we expected it to be before modifying it. */ /* read the text we want to modify */ - if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { + if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { WARN_ON(1); return -EFAULT; } @@ -355,7 +355,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); /* Copy ftrace_caller onto the trampoline memory */ - ret = probe_kernel_read(trampoline, (void *)start_offset, size); + ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size); if (WARN_ON(ret < 0)) goto fail; @@ -363,13 +363,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* The trampoline ends with ret(q) */ retq = (unsigned long)ftrace_stub; - ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); if (WARN_ON(ret < 0)) goto fail; if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { ip = trampoline + (ftrace_regs_caller_ret - ftrace_regs_caller); - ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE); if (WARN_ON(ret < 0)) goto fail; } @@ -506,7 +506,7 @@ static void *addr_from_call(void *ptr) union text_poke_insn call; int ret; - ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE); + ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE); if (WARN_ON_ONCE(ret < 0)) return NULL; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index c44fe7d8d9a4..68acd30c6b87 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -732,11 +732,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) int err; bpt->type = BP_BREAKPOINT; - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; - err = probe_kernel_write((char *)bpt->bpt_addr, + err = copy_to_kernel_nofault((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); if (!err) return err; @@ -768,7 +768,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) return 0; knl_write: - return probe_kernel_write((char *)bpt->bpt_addr, + return copy_to_kernel_nofault((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3bafe1bd4dc7..ada39ddbc922 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -243,7 +243,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ - if (probe_kernel_read(buf, (void *)addr, + if (copy_from_kernel_nofault(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; @@ -346,7 +346,8 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) return 0; /* This can access kernel text if given address is not recovered */ - if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) + if (copy_from_kernel_nofault(dest, (void *)recovered_insn, + MAX_INSN_SIZE)) return 0; kernel_insn_init(insn, dest, MAX_INSN_SIZE); @@ -753,16 +754,11 @@ asm( NOKPROBE_SYMBOL(kretprobe_trampoline); STACK_FRAME_NON_STANDARD(kretprobe_trampoline); -static struct kprobe kretprobe_kprobe = { - .addr = (void *)kretprobe_trampoline, -}; - /* * Called from kretprobe_trampoline */ __used __visible void *trampoline_handler(struct pt_regs *regs) { - struct kprobe_ctlblk *kcb; struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *tmp; @@ -772,16 +768,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) void *frame_pointer; bool skipped = false; - preempt_disable(); - /* * Set a dummy kprobe for avoiding kretprobe recursion. * Since kretprobe never run in kprobe handler, kprobe must not * be running at this point. */ - kcb = get_kprobe_ctlblk(); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); - kcb->kprobe_status = KPROBE_HIT_ACTIVE; + kprobe_busy_begin(); INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -857,7 +849,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) __this_cpu_write(current_kprobe, &ri->rp->kp); ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); - __this_cpu_write(current_kprobe, &kretprobe_kprobe); + __this_cpu_write(current_kprobe, &kprobe_busy); } recycle_rp_inst(ri, &empty_rp); @@ -873,8 +865,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); - __this_cpu_write(current_kprobe, NULL); - preempt_enable(); + kprobe_busy_end(); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 321c19950285..7af4c61dde52 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -56,7 +56,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) * overwritten by jump destination address. In this case, original * bytes must be recovered from op->optinsn.copied_insn buffer. */ - if (probe_kernel_read(buf, (void *)addr, + if (copy_from_kernel_nofault(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 2de365f15684..d7c5e44b26f7 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -478,7 +478,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7); DEFINE_IDTENTRY_RAW(exc_nmi) { - if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id())) + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) return; if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c index ee0286390a4c..9e1def3744f2 100644 --- a/arch/x86/kernel/probe_roms.c +++ b/arch/x86/kernel/probe_roms.c @@ -94,12 +94,12 @@ static bool match_id(struct pci_dev *pdev, unsigned short vendor, unsigned short } static bool probe_list(struct pci_dev *pdev, unsigned short vendor, - const unsigned char *rom_list) + const void *rom_list) { unsigned short device; do { - if (probe_kernel_address(rom_list, device) != 0) + if (get_kernel_nofault(device, rom_list) != 0) device = 0; if (device && match_id(pdev, vendor, device)) @@ -119,19 +119,19 @@ static struct resource *find_oprom(struct pci_dev *pdev) for (i = 0; i < ARRAY_SIZE(adapter_rom_resources); i++) { struct resource *res = &adapter_rom_resources[i]; unsigned short offset, vendor, device, list, rev; - const unsigned char *rom; + const void *rom; if (res->end == 0) break; rom = isa_bus_to_virt(res->start); - if (probe_kernel_address(rom + 0x18, offset) != 0) + if (get_kernel_nofault(offset, rom + 0x18) != 0) continue; - if (probe_kernel_address(rom + offset + 0x4, vendor) != 0) + if (get_kernel_nofault(vendor, rom + offset + 0x4) != 0) continue; - if (probe_kernel_address(rom + offset + 0x6, device) != 0) + if (get_kernel_nofault(device, rom + offset + 0x6) != 0) continue; if (match_id(pdev, vendor, device)) { @@ -139,8 +139,8 @@ static struct resource *find_oprom(struct pci_dev *pdev) break; } - if (probe_kernel_address(rom + offset + 0x8, list) == 0 && - probe_kernel_address(rom + offset + 0xc, rev) == 0 && + if (get_kernel_nofault(list, rom + offset + 0x8) == 0 && + get_kernel_nofault(rev, rom + offset + 0xc) == 0 && rev >= 3 && list && probe_list(pdev, vendor, rom + offset + list)) { oprom = res; @@ -183,14 +183,14 @@ static int __init romsignature(const unsigned char *rom) const unsigned short * const ptr = (const unsigned short *)rom; unsigned short sig; - return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE; + return get_kernel_nofault(sig, ptr) == 0 && sig == ROMSIGNATURE; } static int __init romchecksum(const unsigned char *rom, unsigned long length) { unsigned char sum, c; - for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--) + for (sum = 0; length && get_kernel_nofault(c, rom++) == 0; length--) sum += c; return !length && !sum; } @@ -211,7 +211,7 @@ void __init probe_roms(void) video_rom_resource.start = start; - if (probe_kernel_address(rom + 2, c) != 0) + if (get_kernel_nofault(c, rom + 2) != 0) continue; /* 0 < length <= 0x7f * 512, historically */ @@ -249,7 +249,7 @@ void __init probe_roms(void) if (!romsignature(rom)) continue; - if (probe_kernel_address(rom + 2, c) != 0) + if (get_kernel_nofault(c, rom + 2) != 0) continue; /* 0 < length <= 0x7f * 512, historically */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index af75109485c2..f58679e487f6 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -84,17 +84,16 @@ static inline void cond_local_irq_disable(struct pt_regs *regs) local_irq_disable(); } -int is_valid_bugaddr(unsigned long addr) +__always_inline int is_valid_bugaddr(unsigned long addr) { - unsigned short ud; - if (addr < TASK_SIZE_MAX) return 0; - if (probe_kernel_address((unsigned short *)addr, ud)) - return 0; - - return ud == INSN_UD0 || ud == INSN_UD2; + /* + * We got #UD, if the text isn't readable we'd have gotten + * a different exception. + */ + return *(unsigned short *)addr == INSN_UD2; } static nokprobe_inline int @@ -216,40 +215,45 @@ static inline void handle_invalid_op(struct pt_regs *regs) ILL_ILLOPN, error_get_trap_addr(regs)); } +static noinstr bool handle_bug(struct pt_regs *regs) +{ + bool handled = false; + + if (!is_valid_bugaddr(regs->ip)) + return handled; + + /* + * All lies, just get the WARN/BUG out. + */ + instrumentation_begin(); + /* + * Since we're emulating a CALL with exceptions, restore the interrupt + * state to what it was at the exception site. + */ + if (regs->flags & X86_EFLAGS_IF) + raw_local_irq_enable(); + if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { + regs->ip += LEN_UD2; + handled = true; + } + if (regs->flags & X86_EFLAGS_IF) + raw_local_irq_disable(); + instrumentation_end(); + + return handled; +} + DEFINE_IDTENTRY_RAW(exc_invalid_op) { bool rcu_exit; /* - * Handle BUG/WARN like NMIs instead of like normal idtentries: - * if we bugged/warned in a bad RCU context, for example, the last - * thing we want is to BUG/WARN again in the idtentry code, ad - * infinitum. + * We use UD2 as a short encoding for 'CALL __WARN', as such + * handle it before exception entry to avoid recursive WARN + * in case exception entry is the one triggering WARNs. */ - if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) { - enum bug_trap_type type; - - nmi_enter(); - instrumentation_begin(); - trace_hardirqs_off_finish(); - type = report_bug(regs->ip, regs); - if (regs->flags & X86_EFLAGS_IF) - trace_hardirqs_on_prepare(); - instrumentation_end(); - nmi_exit(); - - if (type == BUG_TRAP_TYPE_WARN) { - /* Skip the ud2. */ - regs->ip += LEN_UD2; - return; - } - - /* - * Else, if this was a BUG and report_bug returns or if this - * was just a normal #UD, we want to continue onward and - * crash. - */ - } + if (!user_mode(regs) && handle_bug(regs)) + return; rcu_exit = idtentry_enter_cond_rcu(regs); instrumentation_begin(); @@ -488,7 +492,8 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, u8 insn_buf[MAX_INSN_SIZE]; struct insn insn; - if (probe_kernel_read(insn_buf, (void *)regs->ip, MAX_INSN_SIZE)) + if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, + MAX_INSN_SIZE)) return GP_NO_HINT; kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE); @@ -690,13 +695,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; /* Copy the IRET target to the temporary storage. */ - memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); + __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8); /* Copy the remainder of the stack from the current stack. */ - memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); + __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip)); /* Update the entry stack */ - memcpy(new_stack, &tmp, sizeof(tmp)); + __memcpy(new_stack, &tmp, sizeof(tmp)); BUG_ON(!user_mode(&new_stack->regs)); return new_stack; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 34a7e0533dad..5bf72fc86a8e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu) kvfree(map); } +/* + * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock. + * + * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with + * apic_map_lock_held. + */ +enum { + CLEAN, + UPDATE_IN_PROGRESS, + DIRTY +}; + void kvm_recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; @@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm) int i; u32 max_id = 255; /* enough space for any xAPIC ID */ - if (!kvm->arch.apic_map_dirty) { - /* - * Read kvm->arch.apic_map_dirty before - * kvm->arch.apic_map - */ - smp_rmb(); + /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ + if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN) return; - } mutex_lock(&kvm->arch.apic_map_lock); - if (!kvm->arch.apic_map_dirty) { + /* + * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map + * (if clean) or the APIC registers (if dirty). + */ + if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty, + DIRTY, UPDATE_IN_PROGRESS) == CLEAN) { /* Someone else has updated the map. */ mutex_unlock(&kvm->arch.apic_map_lock); return; @@ -256,11 +268,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm) lockdep_is_held(&kvm->arch.apic_map_lock)); rcu_assign_pointer(kvm->arch.apic_map, new); /* - * Write kvm->arch.apic_map before - * clearing apic->apic_map_dirty + * Write kvm->arch.apic_map before clearing apic->apic_map_dirty. + * If another update has come in, leave it DIRTY. */ - smp_wmb(); - kvm->arch.apic_map_dirty = false; + atomic_cmpxchg_release(&kvm->arch.apic_map_dirty, + UPDATE_IN_PROGRESS, CLEAN); mutex_unlock(&kvm->arch.apic_map_lock); if (old) @@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) else static_key_slow_inc(&apic_sw_disabled.key); - apic->vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } } static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) { kvm_lapic_set_reg(apic, APIC_ID, id << 24); - apic->vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) { kvm_lapic_set_reg(apic, APIC_LDR, id); - apic->vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) @@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) kvm_lapic_set_reg(apic, APIC_ID, id); kvm_lapic_set_reg(apic, APIC_LDR, ldr); - apic->vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) @@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) case APIC_DFR: if (!apic_x2apic_mode(apic)) { kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); - apic->vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } else ret = 1; break; @@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) static_key_slow_dec_deferred(&apic_hw_disabled); } else { static_key_slow_inc(&apic_hw_disabled.key); - vcpu->kvm->arch.apic_map_dirty = true; + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); } } @@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) if (!apic) return; - vcpu->kvm->arch.apic_map_dirty = false; /* Stop the timer in case it's a reset to an active apic */ hrtimer_cancel(&apic->lapic_timer.timer); @@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) } memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); + atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); kvm_recalculate_apic_map(vcpu->kvm); kvm_apic_set_version(vcpu); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 0ad06bfe2c2c..444bb9c54548 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index fdd05c233308..76817d13c86e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, * Emulate arch specific page modification logging for the * nested hypervisor */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) { if (kvm_x86_ops.write_log_dirty) - return kvm_x86_ops.write_log_dirty(vcpu); + return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa); return 0; } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index a6d484ea110b..bd70ece1ef8b 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte) static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, - int write_fault) + gpa_t addr, int write_fault) { unsigned level, index; pt_element_t pte, orig_pte; @@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) + if (kvm_arch_write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; @@ -360,7 +360,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ++walker->level; do { - gfn_t real_gfn; unsigned long host_addr; pt_access = pte_access; @@ -375,7 +374,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, walker->table_gfn[walker->level - 1] = table_gfn; walker->pte_gpa[walker->level - 1] = pte_gpa; - real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), + real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), nested_access, &walker->fault); @@ -389,12 +388,10 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, * information to fix the exit_qualification or exit_info_1 * fields. */ - if (unlikely(real_gfn == UNMAPPED_GVA)) + if (unlikely(real_gpa == UNMAPPED_GVA)) return 0; - real_gfn = gpa_to_gfn(real_gfn); - - host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn, + host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa), &walker->pte_writable[walker->level - 1]); if (unlikely(kvm_is_error_hva(host_addr))) goto error; @@ -457,7 +454,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + addr, write_fault); if (unlikely(ret < 0)) goto error; else if (ret) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8ccfa4197d9c..c0da4dd78ac5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); -static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) +static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { fastpath_t exit_fastpath; struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 5c0ff80b85c0..7a3675fddec2 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -72,11 +72,24 @@ struct loaded_vmcs { struct vmcs_controls_shadow controls_shadow; }; +static inline bool is_intr_type(u32 intr_info, u32 type) +{ + const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK; + + return (intr_info & mask) == (INTR_INFO_VALID_MASK | type); +} + +static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector) +{ + const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK | + INTR_INFO_VECTOR_MASK; + + return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector); +} + static inline bool is_exception_n(u32 intr_info, u8 vector) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK); + return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector); } static inline bool is_debug(u32 intr_info) @@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info) static inline bool is_machine_check(u32 intr_info) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | - INTR_INFO_VALID_MASK)) == - (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); + return is_exception_n(intr_info, MC_VECTOR); } /* Undocumented: icebp/int1 */ static inline bool is_icebp(u32 intr_info) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); + return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION); } static inline bool is_nmi(u32 intr_info) { - return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) - == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); + return is_intr_type(intr_info, INTR_TYPE_NMI_INTR); } static inline bool is_external_intr(u32 intr_info) { - return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) - == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR); + return is_intr_type(intr_info, INTR_TYPE_EXT_INTR); } enum vmcs_field_width { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 36c771728c8c..cb22f33bf1d8 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6606,23 +6606,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) msrs[i].host, false); } -static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx) -{ - u32 host_umwait_control; - - if (!vmx_has_waitpkg(vmx)) - return; - - host_umwait_control = get_umwait_control_msr(); - - if (vmx->msr_ia32_umwait_control != host_umwait_control) - add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL, - vmx->msr_ia32_umwait_control, - host_umwait_control, false); - else - clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL); -} - static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -6728,9 +6711,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) pt_guest_enter(vmx); - if (vcpu_to_pmu(vcpu)->version) - atomic_switch_perf_msrs(vmx); - atomic_switch_umwait_control_msr(vmx); + atomic_switch_perf_msrs(vmx); if (enable_preemption_timer) vmx_update_hv_timer(vcpu); @@ -7501,11 +7482,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa, dst; + gpa_t dst; if (is_guest_mode(vcpu)) { WARN_ON_ONCE(vmx->nested.pml_full); @@ -7524,7 +7505,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) return 1; } - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + gpa &= ~0xFFFull; dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 8a83b5edc820..639798e4a6ca 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -288,8 +288,6 @@ struct vcpu_vmx { u64 current_tsc_ratio; - u32 host_pkru; - unsigned long host_debugctlmsr; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00c88c2f34e4..3b92db412335 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2856,7 +2856,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); @@ -3196,7 +3196,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); case MSR_IA32_TSCDEADLINE: msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); @@ -4603,7 +4603,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EINVAL; user_tsc_khz = (u32)arg; - if (user_tsc_khz >= kvm_max_guest_tsc_khz) + if (kvm_has_tsc_control && + user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 56b243b14c3a..bbcc05bcefad 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -8,6 +8,8 @@ #include #include +.pushsection .noinstr.text, "ax" + /* * We build a jump to memcpy_orig by default which gets NOPped out on * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which @@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig) retq SYM_FUNC_END(memcpy_orig) +.popsection + #ifndef CONFIG_UML MCSAFE_TEST_CTL diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index fff28c6f73a2..b0dfac3d3df7 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) asm volatile( " testq %[size8],%[size8]\n" " jz 4f\n" + " .align 16\n" "0: movq $0,(%[dst])\n" " addq $8,%[dst]\n" " decl %%ecx ; jnz 0b\n" diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 66be9bd60307..1ead568c0101 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -99,7 +99,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, return !instr_lo || (instr_lo>>1) == 1; case 0x00: /* Prefetch instruction is 0x0F0D or 0x0F18 */ - if (probe_kernel_address(instr, opcode)) + if (get_kernel_nofault(opcode, instr)) return 0; *prefetch = (instr_lo == 0xF) && @@ -133,7 +133,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) while (instr < max_instr) { unsigned char opcode; - if (probe_kernel_address(instr, opcode)) + if (get_kernel_nofault(opcode, instr)) break; instr++; @@ -301,7 +301,7 @@ static int bad_address(void *p) { unsigned long dummy; - return probe_kernel_address((unsigned long *)p, dummy); + return get_kernel_nofault(dummy, (unsigned long *)p); } static void dump_pagetable(unsigned long address) @@ -442,7 +442,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) return; } - if (probe_kernel_read(&desc, (void *)(gdt->address + offset), + if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset), sizeof(struct ldttss_desc))) { pr_alert("%s: 0x%hx -- GDT entry is not readable\n", name, index); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bda909e3e37e..8b4afad84f4a 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -737,7 +737,7 @@ static void __init test_wp_bit(void) __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO); - if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { + if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) { clear_fixmap(FIX_WP_TEST); printk(KERN_CONT "Ok.\n"); return; diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index e1d7d7477c22..92ec176a7293 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -9,7 +9,7 @@ static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); } -bool probe_kernel_read_allowed(const void *unsafe_src, size_t size) +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { unsigned long vaddr = (unsigned long)unsafe_src; @@ -22,7 +22,7 @@ bool probe_kernel_read_allowed(const void *unsafe_src, size_t size) canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr; } #else -bool probe_kernel_read_allowed(const void *unsafe_src, size_t size) +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return (unsigned long)unsafe_src >= TASK_SIZE_MAX; } diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 9c97d814125e..4f15280732ed 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c @@ -302,7 +302,7 @@ static const struct pci_raw_ops *__init pci_find_bios(void) check <= (union bios32 *) __va(0xffff0); ++check) { long sig; - if (probe_kernel_address(&check->fields.signature, sig)) + if (get_kernel_nofault(sig, &check->fields.signature)) continue; if (check->fields.signature != BIOS32_SIGNATURE) diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c index b8f7f193f383..30bd5714a3d4 100644 --- a/arch/x86/platform/intel-mid/sfi.c +++ b/arch/x86/platform/intel-mid/sfi.c @@ -287,8 +287,8 @@ void intel_scu_devices_create(void) adapter = i2c_get_adapter(i2c_bus[i]); if (adapter) { - client = i2c_new_device(adapter, i2c_devs[i]); - if (!client) + client = i2c_new_client_device(adapter, i2c_devs[i]); + if (IS_ERR(client)) pr_err("can't create i2c device %s\n", i2c_devs[i]->type); } else diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 7c65102debaf..db1378c6ff26 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -193,6 +193,8 @@ static void fix_processor_context(void) */ static void notrace __restore_processor_state(struct saved_context *ctxt) { + struct cpuinfo_x86 *c; + if (ctxt->misc_enable_saved) wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); /* @@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) mtrr_bp_restore(); perf_restore_debug_store(); msr_restore_context(ctxt); + + c = &cpu_data(smp_processor_id()); + if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) + init_ia32_feat_ctl(c); } /* Needed by apm.c */ diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index b04e6e72a592..088bd764e0b7 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -34,6 +34,7 @@ KCOV_INSTRUMENT := n PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING +PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector) # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That # in turn leaves some undefined symbols like __fentry__ in purgatory and not diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 33b309d65955..acc49fa6a097 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -386,7 +386,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) preempt_disable(); - probe_kernel_read(&dummy, v, 1); + copy_from_kernel_nofault(&dummy, v, 1); if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 23632a33ed39..4707e90b8ee5 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -24,6 +24,18 @@ void blk_flush_integrity(void) flush_workqueue(kintegrityd_wq); } +void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip) +{ + if (bs && mempool_initialized(&bs->bio_integrity_pool)) { + if (bip->bip_vec) + bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + mempool_free(bip, &bs->bio_integrity_pool); + } else { + kfree(bip); + } +} + /** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to @@ -78,7 +90,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, return bip; err: - mempool_free(bip, &bs->bio_integrity_pool); + __bio_integrity_free(bs, bip); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(bio_integrity_alloc); @@ -99,14 +111,7 @@ void bio_integrity_free(struct bio *bio) kfree(page_address(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset); - if (bs && mempool_initialized(&bs->bio_integrity_pool)) { - bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab); - - mempool_free(bip, &bs->bio_integrity_pool); - } else { - kfree(bip); - } - + __bio_integrity_free(bs, bip); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 44f3d0967cb4..ae722f8b13fb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -376,7 +376,7 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv) { - return __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); + __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 4f57d27bfa73..a9aa6d1e44cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3479,7 +3479,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_hw_queues = nr_cpu_ids; - if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues) + if (nr_hw_queues < 1) + return; + if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) return; list_for_each_entry(q, &set->tag_list, tag_set_list) diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c index 6fdfcb40c537..d333786b5c7e 100644 --- a/block/partitions/ldm.c +++ b/block/partitions/ldm.c @@ -910,7 +910,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb) return false; disk = &vb->vblk.disk; - uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name)); + import_uuid(&disk->disk_id, buffer + 0x18 + r_name); return true; } diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h index 841580af7f9b..d8d6beaa72c4 100644 --- a/block/partitions/ldm.h +++ b/block/partitions/ldm.h @@ -93,7 +93,7 @@ struct frag { /* VBLK Fragment handling */ u8 num; /* Total number of records */ u8 rec; /* This is record number n */ u8 map; /* Which portions are in use */ - u8 data[0]; + u8 data[]; }; /* In memory LDM database structures. */ diff --git a/crypto/algboss.c b/crypto/algboss.c index 535f1f87e6c1..5ebccbd6b74e 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -178,8 +178,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) if (IS_ERR(thread)) goto err_put_larval; - wait_for_completion_interruptible(&larval->completion); - return NOTIFY_STOP; err_put_larval: diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index e2c8ab408bed..4c3bdffe0c3a 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, return PTR_ERR(areq); /* convert iovecs of output buffers into RX SGL */ - err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); + err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len); if (err) goto free; - /* Process only as much RX buffers for which we have TX data */ - if (len > ctx->used) - len = ctx->used; - /* * If more buffers are to be expected to be processed, process only * full block size buffers. diff --git a/crypto/drbg.c b/crypto/drbg.c index 37526eb8c5d5..8d80d93cab97 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1631,10 +1631,12 @@ static int drbg_uninstantiate(struct drbg_state *drbg) if (drbg->random_ready.func) { del_random_ready_callback(&drbg->random_ready); cancel_work_sync(&drbg->seed_work); - crypto_free_rng(drbg->jent); - drbg->jent = NULL; } + if (!IS_ERR_OR_NULL(drbg->jent)) + crypto_free_rng(drbg->jent); + drbg->jent = NULL; + if (drbg->d_ops) drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index ece8c1a921cc..88c8af455ea3 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "acpica/accommon.h" #include "acpica/actables.h" @@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg, { const struct acpi_table_header *header = data; struct acpi_table *table; - int ret; + int ret = security_locked_down(LOCKDOWN_ACPI_TABLES); + + if (ret) + return ret; table = container_of(cfg, struct acpi_table, cfg); diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 3a89909b50a6..76c668c05fa0 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 57d3b2e2d007..0b2c20fddb7c 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -120,7 +120,7 @@ static const u32 tegra_ahb_gizmo[] = { struct tegra_ahb { void __iomem *regs; struct device *dev; - u32 ctx[0]; + u32 ctx[]; }; static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e47c8a4c83db..f50c5f182bb5 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); binder_stats_deleted(BINDER_STAT_PROC); @@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 69361ec43db5..b1cd4d97bc2a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -5778,7 +5777,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - async_schedule(async_port_probe, ap); + ap->cookie = async_schedule(async_port_probe, ap); } return 0; @@ -5920,11 +5919,11 @@ void ata_host_detach(struct ata_host *host) { int i; - /* Ensure ata_port probe has completed */ - async_synchronize_full(); - - for (i = 0; i < host->n_ports; i++) + for (i = 0; i < host->n_ports; i++) { + /* Ensure ata_port probe has completed */ + async_synchronize_cookie(host->ports[i]->cookie + 1); ata_port_detach(host->ports[i]); + } /* the host is dead now, dissociate ACPI */ ata_acpi_dissociate(host); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 435781a16875..46336084b1a9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3684,12 +3684,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -3723,12 +3724,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 980aacdbcf3b..141ac600b64c 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -907,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) - goto err_pm_disable; + goto err_pm_put; host = ata_host_alloc(dev, 1); if (!host) { @@ -937,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev) err_pm_put: pm_runtime_put(dev); -err_pm_disable: pm_runtime_disable(dev); return ret; } @@ -991,8 +990,10 @@ static int sata_rcar_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } if (priv->type == RCAR_GEN3_SATA) { sata_rcar_init_module(priv); @@ -1017,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } sata_rcar_setup_port(host); diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 977d27bd1a22..a97f33d0c59f 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = { .notifier_call = pm_trace_notify, }; -static int early_resume_init(void) +static int __init early_resume_init(void) { hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; } -static int late_resume_init(void) +static int __init late_resume_init(void) { unsigned int val = hash_value_early_read; unsigned int user, file, dev; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index c472f624382d..06a796821e8b 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -17,6 +17,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) { - __be16 *b = buf; - - b[0] = cpu_to_be16(val << shift); + put_unaligned_be16(val << shift, buf); } static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) { - __le16 *b = buf; - - b[0] = cpu_to_le16(val << shift); + put_unaligned_le16(val << shift, buf); } static void regmap_format_16_native(void *buf, unsigned int val, unsigned int shift) { - *(u16 *)buf = val << shift; + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) @@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) { - __be32 *b = buf; - - b[0] = cpu_to_be32(val << shift); + put_unaligned_be32(val << shift, buf); } static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) { - __le32 *b = buf; - - b[0] = cpu_to_le32(val << shift); + put_unaligned_le32(val << shift, buf); } static void regmap_format_32_native(void *buf, unsigned int val, unsigned int shift) { - *(u32 *)buf = val << shift; + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); } #ifdef CONFIG_64BIT static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) { - __be64 *b = buf; - - b[0] = cpu_to_be64((u64)val << shift); + put_unaligned_be64((u64) val << shift, buf); } static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) { - __le64 *b = buf; - - b[0] = cpu_to_le64((u64)val << shift); + put_unaligned_le64((u64) val << shift, buf); } static void regmap_format_64_native(void *buf, unsigned int val, unsigned int shift) { - *(u64 *)buf = (u64)val << shift; + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); } #endif @@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf) static unsigned int regmap_parse_16_be(const void *buf) { - const __be16 *b = buf; - - return be16_to_cpu(b[0]); + return get_unaligned_be16(buf); } static unsigned int regmap_parse_16_le(const void *buf) { - const __le16 *b = buf; - - return le16_to_cpu(b[0]); + return get_unaligned_le16(buf); } static void regmap_parse_16_be_inplace(void *buf) { - __be16 *b = buf; + u16 v = get_unaligned_be16(buf); - b[0] = be16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_16_le_inplace(void *buf) { - __le16 *b = buf; + u16 v = get_unaligned_le16(buf); - b[0] = le16_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_16_native(const void *buf) { - return *(u16 *)buf; + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; } static unsigned int regmap_parse_24(const void *buf) @@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf) static unsigned int regmap_parse_32_be(const void *buf) { - const __be32 *b = buf; - - return be32_to_cpu(b[0]); + return get_unaligned_be32(buf); } static unsigned int regmap_parse_32_le(const void *buf) { - const __le32 *b = buf; - - return le32_to_cpu(b[0]); + return get_unaligned_le32(buf); } static void regmap_parse_32_be_inplace(void *buf) { - __be32 *b = buf; + u32 v = get_unaligned_be32(buf); - b[0] = be32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_32_le_inplace(void *buf) { - __le32 *b = buf; + u32 v = get_unaligned_le32(buf); - b[0] = le32_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_32_native(const void *buf) { - return *(u32 *)buf; + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #ifdef CONFIG_64BIT static unsigned int regmap_parse_64_be(const void *buf) { - const __be64 *b = buf; - - return be64_to_cpu(b[0]); + return get_unaligned_be64(buf); } static unsigned int regmap_parse_64_le(const void *buf) { - const __le64 *b = buf; - - return le64_to_cpu(b[0]); + return get_unaligned_le64(buf); } static void regmap_parse_64_be_inplace(void *buf) { - __be64 *b = buf; + u64 v = get_unaligned_be64(buf); - b[0] = be64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static void regmap_parse_64_le_inplace(void *buf) { - __le64 *b = buf; + u64 v = get_unaligned_le64(buf); - b[0] = le64_to_cpu(b[0]); + memcpy(buf, &v, sizeof(v)); } static unsigned int regmap_parse_64_native(const void *buf) { - return *(u64 *)buf; + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; } #endif @@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map) if (map->hwlock) hwspin_lock_free(map->hwlock); kfree_const(map->name); + kfree(map->patch); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); @@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base); * @reg: Register to read from * @bits: Bits to test * - * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the - * tested bits is not set and 1 if all tested bits are set. + * Returns 0 if at least one of the tested bits is not set, 1 if all tested + * bits are set and a negative error number if the underlying regmap_read() + * fails. */ int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) { diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 14345a87c7cc..33d0831c99b6 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -620,7 +620,7 @@ struct fifo_buffer { unsigned int head_index; unsigned int size; int total; /* sum of all values */ - int values[0]; + int values[]; }; extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size); diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h index e6fc5ad72501..dea59c92ecc1 100644 --- a/drivers/block/drbd/drbd_protocol.h +++ b/drivers/block/drbd/drbd_protocol.h @@ -271,7 +271,7 @@ struct p_rs_param { u32 resync_rate; /* Since protocol version 88 and higher. */ - char verify_alg[0]; + char verify_alg[]; } __packed; struct p_rs_param_89 { @@ -305,7 +305,7 @@ struct p_protocol { u32 two_primaries; /* Since protocol version 87 and higher. */ - char integrity_alg[0]; + char integrity_alg[]; } __packed; @@ -360,7 +360,7 @@ struct p_sizes { u16 dds_flags; /* use enum dds_flags here. */ /* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */ - struct o_qlim qlim[0]; + struct o_qlim qlim[]; } __packed; struct p_state { @@ -409,7 +409,7 @@ struct p_compressed_bm { */ u8 encoding; - u8 code[0]; + u8 code[]; } __packed; struct p_delay_probe93 { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c33bbbfd1bd9..475e1a738560 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1368,14 +1368,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) lo->lo_sizelimit != info->lo_sizelimit) { size_changed = true; sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { - /* If any pages were dirtied after kill_bdev(), try again */ + /* If any pages were dirtied after invalidate_bdev(), try again */ err = -EAGAIN; pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", __func__, lo->lo_number, lo->lo_file_name, @@ -1615,11 +1615,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) return 0; sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); - /* kill_bdev should have truncated all the pages */ + /* invalidate_bdev should have truncated all the pages */ if (lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7420648a1de6..4f61e9209461 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1451,8 +1451,10 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req) static void rbd_osd_format_read(struct ceph_osd_request *osd_req) { struct rbd_obj_request *obj_request = osd_req->r_priv; + struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; + struct ceph_options *opt = rbd_dev->rbd_client->client->options; - osd_req->r_flags = CEPH_OSD_FLAG_READ; + osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica; osd_req->r_snapid = obj_request->img_request->snap_id; } diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 3affd180baac..bb54fb514e40 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -221,6 +221,35 @@ static u32 sysc_read_sysstatus(struct sysc *ddata) return sysc_read(ddata, offset); } +/* Poll on reset status */ +static int sysc_wait_softreset(struct sysc *ddata) +{ + u32 sysc_mask, syss_done, rstval; + int syss_offset, error = 0; + + syss_offset = ddata->offsets[SYSC_SYSSTATUS]; + sysc_mask = BIT(ddata->cap->regbits->srst_shift); + + if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) + syss_done = 0; + else + syss_done = ddata->cfg.syss_mask; + + if (syss_offset >= 0) { + error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, + (rstval & ddata->cfg.syss_mask) == + syss_done, + 100, MAX_MODULE_SOFTRESET_WAIT); + + } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { + error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, + !(rstval & sysc_mask), + 100, MAX_MODULE_SOFTRESET_WAIT); + } + + return error; +} + static int sysc_add_named_clock_from_child(struct sysc *ddata, const char *name, const char *optfck_name) @@ -925,18 +954,47 @@ static int sysc_enable_module(struct device *dev) struct sysc *ddata; const struct sysc_regbits *regbits; u32 reg, idlemodes, best_mode; + int error; ddata = dev_get_drvdata(dev); + + /* + * Some modules like DSS reset automatically on idle. Enable optional + * reset clocks and wait for OCP softreset to complete. + */ + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) { + error = sysc_enable_opt_clocks(ddata); + if (error) { + dev_err(ddata->dev, + "Optional clocks failed for enable: %i\n", + error); + return error; + } + } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); + if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) + sysc_disable_opt_clocks(ddata); + + /* + * Some subsystem private interconnects, like DSS top level module, + * need only the automatic OCP softreset handling with no sysconfig + * register bits to configure. + */ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) return 0; regbits = ddata->cap->regbits; reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); - /* Set CLOCKACTIVITY, we only use it for ick */ + /* + * Set CLOCKACTIVITY, we only use it for ick. And we only configure it + * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware + * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag. + */ if (regbits->clkact_shift >= 0 && - (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT || - ddata->cfg.sysc_val & BIT(regbits->clkact_shift))) + (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT)) reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift; /* Set SIDLE mode */ @@ -991,6 +1049,9 @@ static int sysc_enable_module(struct device *dev) sysc_write_sysconfig(ddata, reg); } + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + if (ddata->module_enable_quirk) ddata->module_enable_quirk(ddata); @@ -1071,6 +1132,9 @@ static int sysc_disable_module(struct device *dev) reg |= 1 << regbits->autoidle_shift; sysc_write_sysconfig(ddata, reg); + /* Flush posted write */ + sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + return 0; } @@ -1488,7 +1552,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false; const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1); int manager_count; - bool framedonetv_irq; + bool framedonetv_irq = true; u32 val, irq_mask = 0; switch (sysc_soc->soc) { @@ -1505,6 +1569,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, break; case SOC_AM4: manager_count = 1; + framedonetv_irq = false; break; case SOC_UNKNOWN: default: @@ -1822,11 +1887,10 @@ static int sysc_legacy_init(struct sysc *ddata) */ static int sysc_reset(struct sysc *ddata) { - int sysc_offset, syss_offset, sysc_val, rstval, error = 0; - u32 sysc_mask, syss_done; + int sysc_offset, sysc_val, error; + u32 sysc_mask; sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; - syss_offset = ddata->offsets[SYSC_SYSSTATUS]; if (ddata->legacy_mode || ddata->cap->regbits->srst_shift < 0 || @@ -1835,11 +1899,6 @@ static int sysc_reset(struct sysc *ddata) sysc_mask = BIT(ddata->cap->regbits->srst_shift); - if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED) - syss_done = 0; - else - syss_done = ddata->cfg.syss_mask; - if (ddata->pre_reset_quirk) ddata->pre_reset_quirk(ddata); @@ -1856,18 +1915,9 @@ static int sysc_reset(struct sysc *ddata) if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); - /* Poll on reset status */ - if (syss_offset >= 0) { - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, - (rstval & ddata->cfg.syss_mask) == - syss_done, - 100, MAX_MODULE_SOFTRESET_WAIT); - - } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, - !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); - } + error = sysc_wait_softreset(ddata); + if (error) + dev_warn(ddata->dev, "OCP softreset timed out\n"); if (ddata->reset_done_quirk) ddata->reset_done_quirk(ddata); diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c index e2330e757f1f..001617033d6a 100644 --- a/drivers/char/hw_random/ks-sa-rng.c +++ b/drivers/char/hw_random/ks-sa-rng.c @@ -244,6 +244,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to enable SA power-domain\n"); + pm_runtime_put_noidle(dev); pm_runtime_disable(dev); return ret; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 31cae88a730b..934c92dcb9ab 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -171,7 +171,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, if (!ptr) goto failed; - probe = probe_kernel_read(bounce, ptr, sz); + probe = copy_from_kernel_nofault(bounce, ptr, sz); unxlate_dev_mem_ptr(p, ptr); if (probe) goto failed; diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index 6282ee2f361c..a8901f90a61a 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev) struct __prci_data *pd; int r; - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + pd = devm_kzalloc(dev, + struct_size(pd, hw_clks.hws, + ARRAY_SIZE(__prci_init_clocks)), + GFP_KERNEL); if (!pd) return -ENOMEM; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8e23a698ce04..e771e8b4f99f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2677,6 +2677,8 @@ static struct acpi_platform_list plat_info[] __initdata = { { } /* End */ }; +#define BITMASK_OOB (BIT(8) | BIT(18)) + static bool __init intel_pstate_platform_pwr_mgmt_exists(void) { const struct x86_cpu_id *id; @@ -2686,8 +2688,9 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); - if (misc_pwr & (1 << 8)) { - pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n"); + if (misc_pwr & BITMASK_OOB) { + pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n"); + pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n"); return true; } } diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index c149d9e20dfd..87197319ab06 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -186,9 +186,10 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) * be frozen safely. */ index = find_deepest_state(drv, dev, U64_MAX, 0, true); - if (index > 0) + if (index > 0) { enter_s2idle_proper(drv, dev, index); - + local_irq_enable(); + } return index; } #endif /* CONFIG_SUSPEND */ diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index a62f228be6da..bc35aa0ec07a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -147,7 +147,7 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API select HW_RANDOM help Selecting this will register the SEC4 hardware rng to - the hw_random API for suppying the kernel entropy pool. + the hw_random API for supplying the kernel entropy pool. endif # CRYPTO_DEV_FSL_CAAM_JR diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 4fcdd262e581..f3d20b7645e0 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -54,7 +54,7 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk) /* * load 1 to clear written reg: - * resets the done interrrupt and returns the RNG to idle. + * resets the done interrupt and returns the RNG to idle. */ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); @@ -156,7 +156,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, DESC_DER_DECO_STAT_SHIFT; /* - * If an error occured in the descriptor, then + * If an error occurred in the descriptor, then * the DECO status field will be set to 0x0D */ if (deco_state == DECO_STAT_HOST_ERR) @@ -264,7 +264,7 @@ static void devm_deinstantiate_rng(void *data) * - -ENODEV if DECO0 couldn't be acquired * - -EAGAIN if an error occurred when executing the descriptor * f.i. there was a RNG hardware error due to not "good enough" - * entropy being aquired. + * entropy being acquired. */ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, int gen_sk) @@ -733,8 +733,8 @@ static int caam_probe(struct platform_device *pdev) handle_imx6_err005766(&ctrl->mcr); /* - * Read the Compile Time paramters and SCFGR to determine - * if Virtualization is enabled for this platform + * Read the Compile Time parameters and SCFGR to determine + * if virtualization is enabled for this platform */ scfgr = rd_reg32(&ctrl->scfgr); @@ -863,9 +863,9 @@ static int caam_probe(struct platform_device *pdev) } /* * if instantiate_rng(...) fails, the loop will rerun - * and the kick_trng(...) function will modfiy the + * and the kick_trng(...) function will modify the * upper and lower limits of the entropy sampling - * interval, leading to a sucessful initialization of + * interval, leading to a successful initialization of * the RNG. */ ret = instantiate_rng(dev, inst_handles, @@ -882,8 +882,8 @@ static int caam_probe(struct platform_device *pdev) return ret; } /* - * Set handles init'ed by this module as the complement of the - * already initialized ones + * Set handles initialized by this module as the complement of + * the already initialized ones */ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK; diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index e796d3cb9be8..e13470901586 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -18,7 +18,7 @@ */ #define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */ -#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */ +#define SEC4_SG_LEN_FIN 0x40000000 /* Last entry in table */ #define SEC4_SG_BPID_MASK 0x000000ff #define SEC4_SG_BPID_SHIFT 16 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */ @@ -113,7 +113,7 @@ */ #define HDR_REVERSE 0x00000800 -/* Propogate DNR property to SharedDesc */ +/* Propagate DNR property to SharedDesc */ #define HDR_PROP_DNR 0x00000800 /* JobDesc/SharedDesc share property */ diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h index 68c1fd5dee5d..8ccc22075043 100644 --- a/drivers/crypto/caam/pdb.h +++ b/drivers/crypto/caam/pdb.h @@ -453,7 +453,7 @@ struct srtp_decap_pdb { #define DSA_PDB_N_MASK 0x7f struct dsa_sign_pdb { - u32 sgf_ln; /* Use DSA_PDB_ defintions per above */ + u32 sgf_ln; /* Use DSA_PDB_ definitions per above */ u8 *q; u8 *r; u8 *g; /* or Gx,y */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index a2426334be61..476113e12489 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -376,6 +376,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_csr input; struct sev_data_pek_csr *data; + void __user *input_address; void *blob = NULL; int ret; @@ -394,6 +395,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) goto cmd; /* allocate a physically contiguous buffer to store the CSR blob */ + input_address = (void __user *)input.address; if (input.length > SEV_FW_BLOB_MAX_SIZE) { ret = -EFAULT; goto e_free; @@ -426,7 +428,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) } if (blob) { - if (copy_to_user((void __user *)input.address, blob, input.length)) + if (copy_to_user(input_address, blob, input.length)) ret = -EFAULT; } @@ -437,7 +439,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) return ret; } -void *psp_copy_user_blob(u64 __user uaddr, u32 len) +void *psp_copy_user_blob(u64 uaddr, u32 len) { if (!uaddr || !len) return ERR_PTR(-EINVAL); @@ -446,7 +448,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len) if (len > SEV_FW_BLOB_MAX_SIZE) return ERR_PTR(-EINVAL); - return memdup_user((void __user *)(uintptr_t)uaddr, len); + return memdup_user((void __user *)uaddr, len); } EXPORT_SYMBOL_GPL(psp_copy_user_blob); @@ -621,6 +623,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) { struct sev_user_data_get_id2 input; struct sev_data_get_id *data; + void __user *input_address; void *id_blob = NULL; int ret; @@ -631,6 +634,8 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; + input_address = (void __user *)input.address; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -660,8 +665,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) } if (id_blob) { - if (copy_to_user((void __user *)input.address, - id_blob, data->len)) { + if (copy_to_user(input_address, id_blob, data->len)) { ret = -EFAULT; goto e_free; } @@ -720,6 +724,8 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) struct sev_user_data_pdh_cert_export input; void *pdh_blob = NULL, *cert_blob = NULL; struct sev_data_pdh_cert_export *data; + void __user *input_cert_chain_address; + void __user *input_pdh_cert_address; int ret; /* If platform is not in INIT state then transition it to INIT. */ @@ -745,6 +751,9 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) !input.cert_chain_address) goto cmd; + input_pdh_cert_address = (void __user *)input.pdh_cert_address; + input_cert_chain_address = (void __user *)input.cert_chain_address; + /* Allocate a physically contiguous buffer to store the PDH blob. */ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) { ret = -EFAULT; @@ -788,7 +797,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) } if (pdh_blob) { - if (copy_to_user((void __user *)input.pdh_cert_address, + if (copy_to_user(input_pdh_cert_address, pdh_blob, input.pdh_cert_len)) { ret = -EFAULT; goto e_free_cert; @@ -796,7 +805,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) } if (cert_blob) { - if (copy_to_user((void __user *)input.cert_chain_address, + if (copy_to_user(input_cert_chain_address, cert_blob, input.cert_chain_len)) ret = -EFAULT; } diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index b3fdbdc25acb..31e427e273f8 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -223,7 +223,7 @@ struct chcr_authenc_ctx { struct __aead_ctx { struct chcr_gcm_ctx gcm[0]; - struct chcr_authenc_ctx authenc[0]; + struct chcr_authenc_ctx authenc[]; }; struct chcr_aead_ctx { @@ -235,7 +235,7 @@ struct chcr_aead_ctx { u8 nonce[4]; u16 hmac_ctrl; u16 mayverify; - struct __aead_ctx ctx[0]; + struct __aead_ctx ctx[]; }; struct hmac_ctx { @@ -247,7 +247,7 @@ struct hmac_ctx { struct __crypto_ctx { struct hmac_ctx hmacctx[0]; struct ablk_ctx ablkctx[0]; - struct chcr_aead_ctx aeadctx[0]; + struct chcr_aead_ctx aeadctx[]; }; struct chcr_context { @@ -257,7 +257,7 @@ struct chcr_context { unsigned int ntxq; unsigned int nrxq; struct completion cbc_aes_aio_done; - struct __crypto_ctx crypto_ctx[0]; + struct __crypto_ctx crypto_ctx[]; }; struct chcr_hctx_per_wr { diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 0e8c7e324fb4..725a739800b0 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -66,7 +66,8 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, sgl_size = sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl); - block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1)); + block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ? + PAGE_SHIFT + MAX_ORDER - 1 : 31); sgl_num_per_block = block_size / sgl_size; block_num = count / sgl_num_per_block; remain_sgl = count % sgl_num_per_block; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 60e744f680d3..1e0a1d70ebd3 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -118,6 +118,9 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) struct otx_cpt_req_info *cpt_req; struct pci_dev *pdev; + if (!cpt_info) + goto complete; + cpt_req = cpt_info->req; if (!status) { /* @@ -129,10 +132,10 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2) !cpt_req->is_enc) status = validate_hmac_cipher_null(cpt_req); } - if (cpt_info) { - pdev = cpt_info->pdev; - do_request_cleanup(pdev, cpt_info); - } + pdev = cpt_info->pdev; + do_request_cleanup(pdev, cpt_info); + +complete: if (areq) areq->complete(areq, status); } diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index c9aa15fb86a9..193b40e7aec0 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -135,7 +135,8 @@ int __init dio_find(int deviceid) else va = ioremap(pa, PAGE_SIZE); - if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) { + if (copy_from_kernel_nofault(&i, + (unsigned char *)va + DIO_IDOFF, 1)) { if (scode >= DIOII_SCBASE) iounmap(va); continue; /* no board present at that select code */ @@ -208,7 +209,8 @@ static int __init dio_init(void) else va = ioremap(pa, PAGE_SIZE); - if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) { + if (copy_from_kernel_nofault(&i, + (unsigned char *)va + DIO_IDOFF, 1)) { if (scode >= DIOII_SCBASE) iounmap(va); continue; /* no board present at that select code */ diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c index 8853d442430b..a8cfb59f6efe 100644 --- a/drivers/dma/milbeaut-hdmac.c +++ b/drivers/dma/milbeaut-hdmac.c @@ -77,7 +77,7 @@ struct milbeaut_hdmac_device { struct dma_device ddev; struct clk *clk; void __iomem *reg_base; - struct milbeaut_hdmac_chan channels[0]; + struct milbeaut_hdmac_chan channels[]; }; static struct milbeaut_hdmac_chan * diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c index ab3d2f395378..85a597228fb0 100644 --- a/drivers/dma/milbeaut-xdmac.c +++ b/drivers/dma/milbeaut-xdmac.c @@ -74,7 +74,7 @@ struct milbeaut_xdmac_chan { struct milbeaut_xdmac_device { struct dma_device ddev; void __iomem *reg_base; - struct milbeaut_xdmac_chan channels[0]; + struct milbeaut_xdmac_chan channels[]; }; static struct milbeaut_xdmac_chan * diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 4ab493d46375..347146a6e1d0 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -127,7 +127,7 @@ struct moxart_desc { unsigned int dma_cycles; struct virt_dma_desc vd; uint8_t es; - struct moxart_sg sg[0]; + struct moxart_sg sg[]; }; struct moxart_chan { diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index b9f0d9636620..55fc7400f717 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -225,7 +225,7 @@ struct tegra_dma { u32 global_pause_count; /* Last member of the structure */ - struct tegra_dma_channel channels[0]; + struct tegra_dma_channel channels[]; }; static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index c4a5c170c1f9..35d81bd857f1 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -211,7 +211,7 @@ struct edma_desc { u32 residue; u32 residue_stat; - struct edma_pset pset[0]; + struct edma_pset pset[]; }; struct edma_cc; diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 945b7c604f91..c91e2dc1bb72 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -170,7 +170,7 @@ struct udma_desc { void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ unsigned int hwdesc_count; - struct udma_hwdesc hwdesc[0]; + struct udma_hwdesc hwdesc[]; }; enum udma_chan_state { diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 39382694fdfc..68e48bf54d78 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -88,7 +88,7 @@ struct timb_dma { struct dma_device dma; void __iomem *membase; struct tasklet_struct tasklet; - struct timb_dma_chan channels[0]; + struct timb_dma_chan channels[]; }; static struct device *chan2dev(struct dma_chan *chan) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ef90070a9194..6262f6370c5d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -269,6 +269,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci) if (pvt->model == 0x60) amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + else + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); } diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index c7ea4f2d5ca6..fb6c651214f3 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -117,7 +117,7 @@ struct inbound_transaction_resource { struct descriptor_resource { struct client_resource resource; struct fw_descriptor descriptor; - u32 data[0]; + u32 data[]; }; struct iso_resource { diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 404a035f104d..439d918bbaaf 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -620,7 +620,7 @@ struct fw_request { u32 request_header[4]; int ack; u32 length; - u32 data[0]; + u32 data[]; }; static void free_response_callback(struct fw_packet *packet, diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index 4b0e4ee655a1..71d5f16f311c 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -191,7 +191,7 @@ struct fw_node { /* Upper layer specific data. */ void *data; - struct fw_node *ports[0]; + struct fw_node *ports[]; }; static inline struct fw_node *fw_node_get(struct fw_node *node) diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 6ca2f5ab6c57..5fd6a60b6741 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -52,7 +52,7 @@ struct pcl { struct packet { unsigned int length; - char data[0]; + char data[]; }; struct packet_buffer { diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 33269316f111..54fdc39cd0bc 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -111,7 +111,7 @@ struct descriptor_buffer { dma_addr_t buffer_bus; size_t buffer_size; size_t used; - struct descriptor buffer[0]; + struct descriptor buffer[]; }; struct context { diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c index b6180023eba7..8b8127fa8955 100644 --- a/drivers/firmware/dmi-sysfs.c +++ b/drivers/firmware/dmi-sysfs.c @@ -262,7 +262,7 @@ struct dmi_system_event_log { u8 header_format; u8 type_descriptors_supported_count; u8 per_log_type_descriptor_length; - u8 supported_log_type_descriptos[0]; + u8 supported_log_type_descriptos[]; } __packed; #define DMI_SYSFS_SEL_FIELD(_field) \ diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index e6fc022bc87e..3939699e62fe 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -278,3 +278,14 @@ config EFI_EARLYCON depends on SERIAL_EARLYCON && !ARM && !IA64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT + +config EFI_CUSTOM_SSDT_OVERLAYS + bool "Load custom ACPI SSDT overlay from an EFI variable" + depends on EFI_VARS && ACPI + default ACPI_TABLE_UPGRADE + help + Allow loading of an ACPI SSDT overlay from an EFI variable specified + by a kernel command line option. + + See Documentation/admin-guide/acpi/ssdt-overlays.rst for more + information. diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index c697e70ca7e7..71c445d20258 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -52,9 +52,11 @@ static phys_addr_t __init efi_to_phys(unsigned long addr) } static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR; +static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR; static const efi_config_table_type_t arch_tables[] __initconst = { {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table}, + {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, {} }; @@ -62,7 +64,8 @@ static void __init init_screen_info(void) { struct screen_info *si; - if (screen_info_table != EFI_INVALID_TABLE_ADDR) { + if (IS_ENABLED(CONFIG_ARM) && + screen_info_table != EFI_INVALID_TABLE_ADDR) { si = early_memremap_ro(screen_info_table, sizeof(*si)); if (!si) { pr_err("Could not map screen_info config table\n"); @@ -116,7 +119,8 @@ static int __init uefi_init(u64 efi_system_table) goto out; } retval = efi_config_parse_tables(config_tables, systab->nr_tables, - arch_tables); + IS_ENABLED(CONFIG_ARM) ? arch_tables + : NULL); early_memunmap(config_tables, table_size); out: @@ -238,9 +242,37 @@ void __init efi_init(void) init_screen_info(); +#ifdef CONFIG_ARM /* ARM does not permit early mappings to persist across paging_init() */ - if (IS_ENABLED(CONFIG_ARM)) - efi_memmap_unmap(); + efi_memmap_unmap(); + + if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { + struct efi_arm_entry_state *state; + bool dump_state = true; + + state = early_memremap_ro(cpu_state_table, + sizeof(struct efi_arm_entry_state)); + if (state == NULL) { + pr_warn("Unable to map CPU entry state table.\n"); + return; + } + + if ((state->sctlr_before_ebs & 1) == 0) + pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); + else if ((state->sctlr_after_ebs & 1) == 0) + pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); + else + dump_state = false; + + if (dump_state || efi_enabled(EFI_DBG)) { + pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs); + pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs); + pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs); + pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs); + } + early_memunmap(state, sizeof(struct efi_arm_entry_state)); + } +#endif } static bool efifb_overlaps_pci_range(const struct of_pci_range *range) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 7f1657b6c30d..5114cae4ec97 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -189,7 +189,7 @@ static void generic_ops_unregister(void) efivars_unregister(&generic_efivars); } -#if IS_ENABLED(CONFIG_ACPI) +#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS #define EFIVAR_SSDT_NAME_MAX 16 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata; static int __init efivar_ssdt_setup(char *str) @@ -622,7 +622,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, rsv = (void *)(p + prsv % PAGE_SIZE); /* reserve the entry itself */ - memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size)); + memblock_reserve(prsv, + struct_size(rsv, entry, rsv->size)); for (i = 0; i < atomic_read(&rsv->count); i++) { memblock_reserve(rsv->entry[i].base, diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index e3d692696583..d5915272141f 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 75daaf20374e..4cce372edaf4 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -6,7 +6,8 @@ # enabled, even if doing so doesn't break the build. # cflags-$(CONFIG_X86_32) := -march=i386 -cflags-$(CONFIG_X86_64) := -mcmodel=small +cflags-$(CONFIG_X86_64) := -mcmodel=small \ + $(call cc-option,-maccumulate-outgoing-args) cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index 40243f524556..d08e5d55838c 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -7,10 +7,49 @@ #include "efistub.h" +static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID; + +struct efi_arm_entry_state *efi_entry_state; + +static void get_cpu_state(u32 *cpsr, u32 *sctlr) +{ + asm("mrs %0, cpsr" : "=r"(*cpsr)); + if ((*cpsr & MODE_MASK) == HYP_MODE) + asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr)); + else + asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr)); +} + efi_status_t check_platform_features(void) { + efi_status_t status; + u32 cpsr, sctlr; int block; + get_cpu_state(&cpsr, &sctlr); + + efi_info("Entering in %s mode with MMU %sabled\n", + ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC", + (sctlr & 1) ? "en" : "dis"); + + status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, + sizeof(*efi_entry_state), + (void **)&efi_entry_state); + if (status != EFI_SUCCESS) { + efi_err("allocate_pool() failed\n"); + return status; + } + + efi_entry_state->cpsr_before_ebs = cpsr; + efi_entry_state->sctlr_before_ebs = sctlr; + + status = efi_bs_call(install_configuration_table, &cpu_state_guid, + efi_entry_state); + if (status != EFI_SUCCESS) { + efi_err("install_configuration_table() failed\n"); + goto free_state; + } + /* non-LPAE kernels can run anywhere */ if (!IS_ENABLED(CONFIG_ARM_LPAE)) return EFI_SUCCESS; @@ -19,9 +58,22 @@ efi_status_t check_platform_features(void) block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block < 5) { efi_err("This LPAE kernel is not supported by your CPU\n"); - return EFI_UNSUPPORTED; + status = EFI_UNSUPPORTED; + goto drop_table; } return EFI_SUCCESS; + +drop_table: + efi_bs_call(install_configuration_table, &cpu_state_guid, NULL); +free_state: + efi_bs_call(free_pool, efi_entry_state); + return status; +} + +void efi_handle_post_ebs_state(void) +{ + get_cpu_state(&efi_entry_state->cpsr_after_ebs, + &efi_entry_state->sctlr_after_ebs); } static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 89f075275300..d40fd68c6bb2 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -32,6 +32,10 @@ bool __pure __efi_soft_reserve_enabled(void) return !efi_nosoftreserve; } +/** + * efi_char16_puts() - Write a UCS-2 encoded string to the console + * @str: UCS-2 encoded string + */ void efi_char16_puts(efi_char16_t *str) { efi_call_proto(efi_table_attr(efi_system_table, con_out), @@ -83,6 +87,10 @@ u32 utf8_to_utf32(const u8 **s8) return c32; } +/** + * efi_puts() - Write a UTF-8 encoded string to the console + * @str: UTF-8 encoded string + */ void efi_puts(const char *str) { efi_char16_t buf[128]; @@ -113,6 +121,16 @@ void efi_puts(const char *str) } } +/** + * efi_printk() - Print a kernel message + * @fmt: format string + * + * The first letter of the format string is used to determine the logging level + * of the message. If the level is less then the current EFI logging level, the + * message is suppressed. The message will be truncated to 255 bytes. + * + * Return: number of printed characters + */ int efi_printk(const char *fmt, ...) { char printf_buf[256]; @@ -154,13 +172,18 @@ int efi_printk(const char *fmt, ...) return printed; } -/* - * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= +/** + * efi_parse_options() - Parse EFI command line options + * @cmdline: kernel command line + * + * Parse the ASCII string @cmdline for EFI options, denoted by the efi= * option, e.g. efi=nochunk. * * It should be noted that efi= is parsed in two very different * environments, first in the early boot environment of the EFI boot * stub, and subsequently during the kernel boot. + * + * Return: status code */ efi_status_t efi_parse_options(char const *cmdline) { @@ -286,13 +309,21 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len) return (char *)cmdline_addr; } -/* +/** + * efi_exit_boot_services() - Exit boot services + * @handle: handle of the exiting image + * @map: pointer to receive the memory map + * @priv: argument to be passed to @priv_func + * @priv_func: function to process the memory map before exiting boot services + * * Handle calling ExitBootServices according to the requirements set out by the * spec. Obtains the current memory map, and returns that info after calling * ExitBootServices. The client must specify a function to perform any * processing of the memory map data prior to ExitBootServices. A client * specific structure may be passed to the function via priv. The client * function may be called multiple times. + * + * Return: status code */ efi_status_t efi_exit_boot_services(void *handle, struct efi_boot_memmap *map, @@ -361,6 +392,11 @@ efi_status_t efi_exit_boot_services(void *handle, return status; } +/** + * get_efi_config_table() - retrieve UEFI configuration table + * @guid: GUID of the configuration table to be retrieved + * Return: pointer to the configuration table or NULL + */ void *get_efi_config_table(efi_guid_t guid) { unsigned long tables = efi_table_attr(efi_system_table, tables); @@ -408,17 +444,18 @@ static const struct { }; /** - * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path + * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path * @load_addr: pointer to store the address where the initrd was loaded * @load_size: pointer to store the size of the loaded initrd * @max: upper limit for the initrd memory allocation - * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which - * case @load_addr and @load_size are assigned accordingly - * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd - * device path - * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL - * %EFI_OUT_OF_RESOURCES if memory allocation failed - * %EFI_LOAD_ERROR in all other cases + * + * Return: + * * %EFI_SUCCESS if the initrd was loaded successfully, in which + * case @load_addr and @load_size are assigned accordingly + * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path + * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL + * * %EFI_OUT_OF_RESOURCES if memory allocation failed + * * %EFI_LOAD_ERROR in all other cases */ static efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr, @@ -481,6 +518,16 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, load_addr, load_size); } +/** + * efi_load_initrd() - Load initial RAM disk + * @image: EFI loaded image protocol + * @load_addr: pointer to loaded initrd + * @load_size: size of loaded initrd + * @soft_limit: preferred size of allocated memory for loading the initrd + * @hard_limit: minimum size of allocated memory + * + * Return: status code + */ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long *load_addr, unsigned long *load_size, @@ -505,6 +552,15 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, return status; } +/** + * efi_wait_for_key() - Wait for key stroke + * @usec: number of microseconds to wait for key stroke + * @key: key entered + * + * Wait for up to @usec microseconds for a key stroke. + * + * Return: status code, EFI_SUCCESS if key received + */ efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key) { efi_event_t events[2], timer; diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index e97370bdfdb0..3318ec3f8e5b 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -329,6 +329,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, if (status != EFI_SUCCESS) goto fail_free_initrd; + if (IS_ENABLED(CONFIG_ARM)) + efi_handle_post_ebs_state(); + efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr)); /* not reached */ diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index bcd8c0a785f0..2c9d42264c29 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -157,8 +157,14 @@ typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *); #define EFI_EVT_NOTIFY_WAIT 0x00000100U #define EFI_EVT_NOTIFY_SIGNAL 0x00000200U -/* - * boottime->wait_for_event takes an array of events as input. +/** + * efi_set_event_at() - add event to events array + * + * @events: array of UEFI events + * @ids: index where to put the event in the array + * @event: event to add to the aray + * + * boottime->wait_for_event() takes an array of events as input. * Provide a helper to set it up correctly for mixed mode. */ static inline @@ -771,4 +777,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long soft_limit, unsigned long hard_limit); +void efi_handle_post_ebs_state(void); + #endif diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c index 2005e33b33d5..630caa6b1f4c 100644 --- a/drivers/firmware/efi/libstub/file.c +++ b/drivers/firmware/efi/libstub/file.c @@ -102,12 +102,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, if (!found) return 0; + /* Skip any leading slashes */ + while (cmdline[i] == L'/' || cmdline[i] == L'\\') + i++; + while (--result_len > 0 && i < cmdline_len) { - if (cmdline[i] == L'\0' || - cmdline[i] == L'\n' || - cmdline[i] == L' ') + efi_char16_t c = cmdline[i++]; + + if (c == L'\0' || c == L'\n' || c == L' ') break; - *result++ = cmdline[i++]; + else if (c == L'/') + /* Replace UNIX dir separators with EFI standard ones */ + *result++ = L'\\'; + else + *result++ = c; } *result = L'\0'; return i; diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c index a700b3c7f7d0..159fb4e456c6 100644 --- a/drivers/firmware/efi/libstub/skip_spaces.c +++ b/drivers/firmware/efi/libstub/skip_spaces.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include char *skip_spaces(const char *str) diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c index fd7f0fbec07e..d17e4d6ac9bc 100644 --- a/drivers/firmware/google/memconsole-coreboot.c +++ b/drivers/firmware/google/memconsole-coreboot.c @@ -21,7 +21,7 @@ struct cbmem_cons { u32 size_dont_access_after_boot; u32 cursor; - u8 body[0]; + u8 body[]; } __packed; #define CURSOR_MASK ((1 << 28) - 1) diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c index db0812263d46..d23c5c69ab52 100644 --- a/drivers/firmware/google/vpd.c +++ b/drivers/firmware/google/vpd.c @@ -32,7 +32,7 @@ struct vpd_cbmem { u32 version; u32 ro_size; u32 rw_size; - u8 blob[0]; + u8 blob[]; }; struct vpd_section { diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 96758b71a8db..7127a04bca19 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -104,7 +104,7 @@ struct ibft_control { u16 tgt0_off; u16 nic1_off; u16 tgt1_off; - u16 expansion[0]; + u16 expansion[]; } __attribute__((__packed__)); struct ibft_initiator { diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h index ce75d1da9e84..e02540571c52 100644 --- a/drivers/firmware/pcdp.h +++ b/drivers/firmware/pcdp.h @@ -103,6 +103,6 @@ struct pcdp { u8 creator_id[4]; u32 creator_rev; u32 num_uarts; - struct pcdp_uart uart[0]; /* actual size is num_uarts */ + struct pcdp_uart uart[]; /* actual size is num_uarts */ /* remainder of table is pcdp_device structures */ } __attribute__((packed)); diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index ef8098856a47..625c8fdceabf 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -181,6 +181,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property); static void rpi_firmware_print_firmware_revision(struct rpi_firmware *fw) { + time64_t date_and_time; u32 packet; int ret = rpi_firmware_property(fw, RPI_FIRMWARE_GET_FIRMWARE_REVISION, @@ -189,7 +190,9 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw) if (ret) return; - dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet); + /* This is not compatible with y2038 */ + date_and_time = packet; + dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time); } static void diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index b2408a710662..7cd5a29fc437 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -208,7 +208,7 @@ config FPGA_DFL_PCI config FPGA_MGR_ZYNQMP_FPGA tristate "Xilinx ZynqMP FPGA" - depends on ARCH_ZYNQMP || COMPILE_TEST + depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST) help FPGA manager driver support for Xilinx ZynqMP FPGAs. This driver uses the processor configuration port(PCAP) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 775e389c9a13..16596a9ccabe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -696,7 +696,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, * default power levels, write "r" (reset) to the file to reset them. * * - * < For Vega20 > + * < For Vega20 and newer ASICs > * * Reading the file will display: * @@ -1668,7 +1668,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } /** - * DOC: busy_percent + * DOC: gpu_busy_percent * * The amdgpu driver provides a sysfs API for reading how busy the GPU * is as a percentage. The file gpu_busy_percent is used for this. diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index b544baf306f2..5d71c23e2640 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1298,8 +1298,12 @@ static int sdma_v5_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - for (i = 0; i < adev->sdma.num_instances; i++) + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.instance[i].fw != NULL) + release_firmware(adev->sdma.instance[i].fw); + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index f0587d94294d..fee60921fccf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -1076,7 +1077,7 @@ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = kfd->ddev; - return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, + return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, ddev->render->index, DEVCG_ACC_WRITE | DEVCG_ACC_READ); #else diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d27221ddcdeb..0e0c42e9f6a3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -428,6 +428,7 @@ struct kfd_process *kfd_create_process(struct file *filep) (int)process->lead_thread->pid); if (ret) { pr_warn("Creating procfs pid directory failed"); + kobject_put(process->kobj); goto out; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7ced9f87be97..10ac8076d4f2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5024,7 +5024,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct drm_connector *connector = &aconnector->base; struct amdgpu_device *adev = connector->dev->dev_private; struct dc_stream_state *stream; - int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8; + const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; + int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; enum dc_status dc_result = DC_OK; do { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 076af267b488..1d692f4f42f3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1058,7 +1058,6 @@ static const struct { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, {"test_pattern", &dp_phy_test_pattern_fops}, - {"output_bpc", &output_bpc_fops}, {"vrr_range", &vrr_range_fops}, #ifdef CONFIG_DRM_AMD_DC_HDCP {"hdcp_sink_capability", &hdcp_sink_capability_fops}, @@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, &force_yuv420_output_fops); + debugfs_create_file("output_bpc", 0644, dir, connector, + &output_bpc_fops); + connector->debugfs_dpcd_address = 0; connector->debugfs_dpcd_size = 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index dcf84a61de37..949d10ef8304 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size); - if (!srm) - return -EINVAL; + if (!srm) { + ret = -EINVAL; + goto ret; + } if (pos >= srm_size) ret = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 3f66868df171..ea29cf95d470 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -28,8 +28,6 @@ endif endif CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags) DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0ea6662a1563..0c7f247bb7de 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -22,10 +22,12 @@ * Author: AMD */ +#include #include "dc_hw_types.h" #include "dsc.h" #include #include "dc.h" +#include "rc_calc.h" /* This module's internal functions */ @@ -304,22 +306,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) return (value + 9) / 10; } -static inline uint32_t calc_dsc_bpp_x16(uint32_t stream_bandwidth_kbps, uint32_t pix_clk_100hz, uint32_t bpp_increment_div) -{ - uint32_t dsc_target_bpp_x16; - float f_dsc_target_bpp; - float f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f; - uint32_t precision = bpp_increment_div; // bpp_increment_div is actually precision - - f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz; - - // Round down to the nearest precision stop to bring it into DSC spec range - dsc_target_bpp_x16 = (uint32_t)(f_dsc_target_bpp * precision); - dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision; - - return dsc_target_bpp_x16; -} - /* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock * and uncompressed bandwidth. */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 03ae15946c6d..667afbc260f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -23,6 +23,7 @@ * Authors: AMD * */ +#include #include "os_types.h" #include "rc_calc.h" @@ -40,7 +41,8 @@ break -void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp) +static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, + enum max_min max_min, float bpp) { int mode = MODE_SELECT(444, 422, 420); int sel = table_hash(mode, bpc, max_min); @@ -85,7 +87,7 @@ void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum ma memcpy(qps, table[index].qps, sizeof(qp_set)); } -double dsc_roundf(double num) +static double dsc_roundf(double num) { if (num < 0.0) num = num - 0.5; @@ -95,7 +97,7 @@ double dsc_roundf(double num) return (int)(num); } -double dsc_ceil(double num) +static double dsc_ceil(double num) { double retval = (int)num; @@ -105,7 +107,7 @@ double dsc_ceil(double num) return (int)retval; } -void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) +static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) { int *p = ofs; @@ -160,7 +162,7 @@ void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) } } -int median3(int a, int b, int c) +static int median3(int a, int b, int c) { if (a > b) swap(a, b); @@ -172,13 +174,25 @@ int median3(int a, int b, int c) return b; } -void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version) +static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, + enum bits_per_comp bpc, u8 drm_bpp, + bool is_navite_422_or_420, + int slice_width, int slice_height, + int minor_version) { + float bpp; float bpp_group; float initial_xmit_delay_factor; int padding_pixels; int i; + bpp = ((float)drm_bpp / 16.0); + /* in native_422 or native_420 modes, the bits_per_pixel is double the + * target bpp (the latter is what calc_rc_params expects) + */ + if (is_navite_422_or_420) + bpp /= 2.0; + rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); @@ -251,3 +265,128 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->rc_buf_thresh[13] = 8064; } +static u32 _do_bytes_per_pixel_calc(int slice_width, u8 drm_bpp, + bool is_navite_422_or_420) +{ + float bpp; + u32 bytes_per_pixel; + double d_bytes_per_pixel; + + bpp = ((float)drm_bpp / 16.0); + d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; + // TODO: Make sure the formula for calculating this is precise (ceiling + // vs. floor, and at what point they should be applied) + if (is_navite_422_or_420) + d_bytes_per_pixel /= 2; + + bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); + + return bytes_per_pixel; +} + +static u32 _do_calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, + u32 bpp_increment_div) +{ + u32 dsc_target_bpp_x16; + float f_dsc_target_bpp; + float f_stream_bandwidth_100bps; + // bpp_increment_div is actually precision + u32 precision = bpp_increment_div; + + f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f; + f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz; + + // Round down to the nearest precision stop to bring it into DSC spec + // range + dsc_target_bpp_x16 = (u32)(f_dsc_target_bpp * precision); + dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision; + + return dsc_target_bpp_x16; +} + +/** + * calc_rc_params - reads the user's cmdline mode + * @rc: DC internal DSC parameters + * @pps: DRM struct with all required DSC values + * + * This function expects a drm_dsc_config data struct with all the required DSC + * values previously filled out by our driver and based on this information it + * computes some of the DSC values. + * + * @note This calculation requires float point operation, most of it executes + * under kernel_fpu_{begin,end}. + */ +void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps) +{ + enum colour_mode mode; + enum bits_per_comp bpc; + bool is_navite_422_or_420; + u8 drm_bpp = pps->bits_per_pixel; + int slice_width = pps->slice_width; + int slice_height = pps->slice_height; + + mode = pps->convert_rgb ? CM_RGB : (pps->simple_422 ? CM_444 : + (pps->native_422 ? CM_422 : + pps->native_420 ? CM_420 : CM_444)); + bpc = (pps->bits_per_component == 8) ? BPC_8 : (pps->bits_per_component == 10) + ? BPC_10 : BPC_12; + + is_navite_422_or_420 = pps->native_422 || pps->native_420; + + DC_FP_START(); + _do_calc_rc_params(rc, mode, bpc, drm_bpp, is_navite_422_or_420, + slice_width, slice_height, + pps->dsc_version_minor); + DC_FP_END(); +} + +/** + * calc_dsc_bytes_per_pixel - calculate bytes per pixel + * @pps: DRM struct with all required DSC values + * + * Based on the information inside drm_dsc_config, this function calculates the + * total of bytes per pixel. + * + * @note This calculation requires float point operation, most of it executes + * under kernel_fpu_{begin,end}. + * + * Return: + * Return the number of bytes per pixel + */ +u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) + +{ + u32 ret; + u8 drm_bpp = pps->bits_per_pixel; + int slice_width = pps->slice_width; + bool is_navite_422_or_420 = pps->native_422 || pps->native_420; + + DC_FP_START(); + ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp, + is_navite_422_or_420); + DC_FP_END(); + return ret; +} + +/** + * calc_dsc_bpp_x16 - retrieve the dsc bits per pixel + * @stream_bandwidth_kbps: + * @pix_clk_100hz: + * @bpp_increment_div: + * + * Calculate the total of bits per pixel for DSC configuration. + * + * @note This calculation requires float point operation, most of it executes + * under kernel_fpu_{begin,end}. + */ +u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, + u32 bpp_increment_div) +{ + u32 dsc_bpp; + + DC_FP_START(); + dsc_bpp = _do_calc_dsc_bpp_x16(stream_bandwidth_kbps, pix_clk_100hz, + bpp_increment_div); + DC_FP_END(); + return dsc_bpp; +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index b6b1f09c2009..21723fa6561e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -77,7 +77,10 @@ struct qp_entry { typedef struct qp_entry qp_table[]; -void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version); +void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); +u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); +u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, + u32 bpp_increment_div); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 1f6e63b71456..ef830aded5b1 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -27,8 +27,6 @@ #include "dscc_types.h" #include "rc_calc.h" -double dsc_ceil(double num); - static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from) { to->line_buf_depth = from->line_buf_depth; @@ -100,34 +98,13 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params) { - enum colour_mode mode = pps->convert_rgb ? CM_RGB : - (pps->simple_422 ? CM_444 : - (pps->native_422 ? CM_422 : - pps->native_420 ? CM_420 : CM_444)); - enum bits_per_comp bpc = (pps->bits_per_component == 8) ? BPC_8 : - (pps->bits_per_component == 10) ? BPC_10 : BPC_12; - float bpp = ((float) pps->bits_per_pixel / 16.0); - int slice_width = pps->slice_width; - int slice_height = pps->slice_height; int ret; struct rc_params rc; struct drm_dsc_config dsc_cfg; - double d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; + dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps); - // TODO: Make sure the formula for calculating this is precise (ceiling vs. floor, and at what point they should be applied) - if (pps->native_422 || pps->native_420) - d_bytes_per_pixel /= 2; - - dsc_params->bytes_per_pixel = (uint32_t)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - /* in native_422 or native_420 modes, the bits_per_pixel is double the target bpp - * (the latter is what calc_rc_params expects) - */ - if (pps->native_422 || pps->native_420) - bpp /= 2.0; - - calc_rc_params(&rc, mode, bpc, bpp, slice_width, slice_height, pps->dsc_version_minor); + calc_rc_params(&rc, pps); dsc_params->pps = *pps; dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 9431b48aecb4..bcfe34ef8c28 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -843,7 +843,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma, pow_buffer_ptr = -1; // reset back to no optimize ret = true; release: - kfree(coeff); + kvfree(coeff); return ret; } @@ -1777,7 +1777,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, kfree(rgb_regamma); rgb_regamma_alloc_fail: - kvfree(rgb_user); + kfree(rgb_user); rgb_user_alloc_fail: return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 85e5b1ed22c2..56923a96b450 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) switch (dev_id) { case 0x67BA: - case 0x66B1: + case 0x67B1: smu_data->power_tune_defaults = &defaults_hawaii_pro; break; case 0x67B8: diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index cf804389f5ec..e464429d32df 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -61,13 +61,8 @@ int drm_i2c_encoder_init(struct drm_device *dev, request_module("%s%s", I2C_MODULE_PREFIX, info->type); - client = i2c_new_device(adap, info); - if (!client) { - err = -ENOMEM; - goto fail; - } - - if (!client->dev.driver) { + client = i2c_new_client_device(adap, info); + if (!i2c_client_has_driver(client)) { err = -ENODEV; goto fail_unregister; } @@ -84,7 +79,7 @@ int drm_i2c_encoder_init(struct drm_device *dev, err = encoder_drv->encoder_init(client, dev, encoder); if (err) - goto fail_unregister; + goto fail_module_put; if (info->platform_data) encoder->slave_funcs->set_config(&encoder->base, @@ -92,10 +87,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, return 0; +fail_module_put: + module_put(module); fail_unregister: i2c_unregister_device(client); - module_put(module); -fail: return err; } EXPORT_SYMBOL(drm_i2c_encoder_init); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 170aa7689110..5609e164805f 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -227,6 +227,42 @@ int drm_fb_helper_debug_leave(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_debug_leave); +static int +__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, + bool force) +{ + bool do_delayed; + int ret; + + if (!drm_fbdev_emulation || !fb_helper) + return -ENODEV; + + if (READ_ONCE(fb_helper->deferred_setup)) + return 0; + + mutex_lock(&fb_helper->lock); + if (force) { + /* + * Yes this is the _locked version which expects the master lock + * to be held. But for forced restores we're intentionally + * racing here, see drm_fb_helper_set_par(). + */ + ret = drm_client_modeset_commit_locked(&fb_helper->client); + } else { + ret = drm_client_modeset_commit(&fb_helper->client); + } + + do_delayed = fb_helper->delayed_hotplug; + if (do_delayed) + fb_helper->delayed_hotplug = false; + mutex_unlock(&fb_helper->lock); + + if (do_delayed) + drm_fb_helper_hotplug_event(fb_helper); + + return ret; +} + /** * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration * @fb_helper: driver-allocated fbdev helper, can be NULL @@ -240,27 +276,7 @@ EXPORT_SYMBOL(drm_fb_helper_debug_leave); */ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) { - bool do_delayed; - int ret; - - if (!drm_fbdev_emulation || !fb_helper) - return -ENODEV; - - if (READ_ONCE(fb_helper->deferred_setup)) - return 0; - - mutex_lock(&fb_helper->lock); - ret = drm_client_modeset_commit(&fb_helper->client); - - do_delayed = fb_helper->delayed_hotplug; - if (do_delayed) - fb_helper->delayed_hotplug = false; - mutex_unlock(&fb_helper->lock); - - if (do_delayed) - drm_fb_helper_hotplug_event(fb_helper); - - return ret; + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); @@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct fb_var_screeninfo *var = &info->var; + bool force; if (oops_in_progress) return -EBUSY; @@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info) return -EINVAL; } - drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + /* + * Normally we want to make sure that a kms master takes precedence over + * fbdev, to avoid fbdev flickering and occasionally stealing the + * display status. But Xorg first sets the vt back to text mode using + * the KDSET IOCTL with KD_TEXT, and only after that drops the master + * status when exiting. + * + * In the past this was caught by drm_fb_helper_lastclose(), but on + * modern systems where logind always keeps a drm fd open to orchestrate + * the vt switching, this doesn't work. + * + * To not break the userspace ABI we have this special case here, which + * is only used for the above case. Everything else uses the normal + * commit function, which ensures that we never steal the display from + * an active drm master. + */ + force = var->activate & FB_ACTIVATE_KD_TEXT; + + __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); return 0; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ffd95bfeaa94..d00ea384dcbf 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data { int orientation; }; -static const struct drm_dmi_panel_orientation_data acer_s1003 = { - .width = 800, - .height = 1280, - .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, -}; - static const struct drm_dmi_panel_orientation_data asus_t100ha = { .width = 800, .height = 1280, @@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, - .driver_data = (void *)&acer_s1003, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"), }, .driver_data = (void *)&asus_t100ha, + }, { /* Asus T101HA */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index aa22465bb56e..0575a1eea2a1 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2579,14 +2579,14 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, static void tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, - u32 level) + u32 level, enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; - if (encoder->type == INTEL_OUTPUT_HDMI) { + if (type == INTEL_OUTPUT_HDMI) { n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); ddi_translations = tgl_dkl_phy_hdmi_ddi_trans; } else { @@ -2638,7 +2638,7 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, if (intel_phy_is_combo(dev_priv, phy)) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else - tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level); + tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level, type); } static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels) @@ -2987,7 +2987,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); } - ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); /* DPPATC */ @@ -3472,7 +3472,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, INTEL_OUTPUT_DP_MST); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state); + if (!is_mst) + intel_dp_set_infoframes(encoder, false, + old_crtc_state, old_conn_state); /* * Power down sink before disabling the port, otherwise we end diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index d18b406f2a7d..f29e51ce489c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -397,6 +397,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, */ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, false); + + /* + * BSpec 4287: disable DIP after the transcoder is disabled and before + * the transcoder clock select is set to none. + */ + if (last_mst_stream) + intel_dp_set_infoframes(&intel_dig_port->base, false, + old_crtc_state, NULL); /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index da5b61085257..8691eb61e185 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) struct measure_breadcrumb { struct i915_request rq; struct intel_ring ring; - u32 cs[1024]; + u32 cs[2048]; }; static int measure_breadcrumb_dw(struct intel_context *ce) @@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce) frame->ring.vaddr = frame->cs; frame->ring.size = sizeof(frame->cs); + frame->ring.wrap = + BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size); frame->ring.effective_size = frame->ring.size; intel_ring_update_space(&frame->ring); frame->rq.ring = &frame->ring; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 87e6c5bdd2dc..7c3d8ef4a47c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move(&rq->sched.link, pl); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + /* Check in case we rollback so far we wrap [size/2] */ + if (intel_ring_direction(rq->ring, + intel_ring_wrap(rq->ring, + rq->tail), + rq->ring->tail) > 0) + rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; + active = rq; } else { struct intel_engine_cs *owner = rq->context->engine; @@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq) * HW has a tendency to ignore us rewinding the TAIL to the end of * an earlier request. */ + GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); + prev = rq->ring->tail; tail = intel_ring_set_tail(rq->ring, rq->tail); - prev = ce->lrc_reg_state[CTX_RING_TAIL]; if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) desc |= CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state[CTX_RING_TAIL] = tail; @@ -1895,7 +1903,8 @@ static void defer_active(struct intel_engine_cs *engine) static bool need_timeslice(const struct intel_engine_cs *engine, - const struct i915_request *rq) + const struct i915_request *rq, + const struct rb_node *rb) { int hint; @@ -1903,9 +1912,28 @@ need_timeslice(const struct intel_engine_cs *engine, return false; hint = engine->execlists.queue_priority_hint; + + if (rb) { + const struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + const struct intel_engine_cs *inflight = + intel_context_inflight(&ve->context); + + if (!inflight || inflight == engine) { + struct i915_request *next; + + rcu_read_lock(); + next = READ_ONCE(ve->request); + if (next) + hint = max(hint, rq_prio(next)); + rcu_read_unlock(); + } + } + if (!list_is_last(&rq->sched.link, &engine->active.requests)) hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); + GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE); return hint >= effective_prio(rq); } @@ -1977,10 +2005,9 @@ static void set_timeslice(struct intel_engine_cs *engine) set_timer_ms(&engine->execlists.timer, duration); } -static void start_timeslice(struct intel_engine_cs *engine) +static void start_timeslice(struct intel_engine_cs *engine, int prio) { struct intel_engine_execlists *execlists = &engine->execlists; - const int prio = queue_prio(execlists); unsigned long duration; if (!intel_engine_has_timeslices(engine)) @@ -2140,7 +2167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) __unwind_incomplete_requests(engine); last = NULL; - } else if (need_timeslice(engine, last) && + } else if (need_timeslice(engine, last, rb) && timeslice_expired(execlists, last)) { if (i915_request_completed(last)) { tasklet_hi_schedule(&execlists->tasklet); @@ -2188,7 +2215,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * Even if ELSP[1] is occupied and not worthy * of timeslices, our queue might be. */ - start_timeslice(engine); + start_timeslice(engine, queue_prio(execlists)); return; } } @@ -2223,7 +2250,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.active.lock); - start_timeslice(engine); + start_timeslice(engine, rq_prio(rq)); return; /* leave this for another sibling */ } @@ -4739,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode) return 0; } +static void assert_request_valid(struct i915_request *rq) +{ + struct intel_ring *ring __maybe_unused = rq->ring; + + /* Can we unwind this request without appearing to go forwards? */ + GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); +} + /* * Reserve space for 2 NOOPs at the end of each request to be * used as a workaround for not being allowed to do lite @@ -4751,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) *cs++ = MI_NOOP; request->wa_tail = intel_ring_offset(request, cs); + /* Check that entire request is less than half the ring */ + assert_request_valid(request); + return cs; } diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 8cda1b7e17ba..bdb324167ef3 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq) GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); return 0; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_ring.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 90a2b9e399b0..85d2bef51524 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -178,6 +178,12 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) wa_write_masked_or(wal, reg, set, set); } +static void +wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr) +{ + wa_write_masked_or(wal, reg, clr, 0); +} + static void wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { @@ -686,6 +692,227 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) return 0; } +static void +gen4_gt_workarounds_init(struct drm_i915_private *i915, + struct i915_wa_list *wal) +{ + /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */ + wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); +} + +static void +g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + gen4_gt_workarounds_init(i915, wal); + + /* WaDisableRenderCachePipelinedFlush:g4x,ilk */ + wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE); +} + +static void +ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + g4x_gt_workarounds_init(i915, wal); + + wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED); +} + +static void +snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ + wa_masked_en(wal, + _3D_CHICKEN, + _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB); + + /* WaDisable_RenderCache_OperationalFlush:snb */ + wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, + GEN6_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); + + wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB); + + wa_masked_en(wal, + _3D_CHICKEN3, + /* WaStripsFansDisableFastClipPerformanceFix:snb */ + _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL | + /* + * Bspec says: + * "This bit must be set if 3DSTATE_CLIP clip mode is set + * to normal and 3DSTATE_SF number of SF output attributes + * is more than 16." + */ + _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH); +} + +static void +ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* WaDisableEarlyCull:ivb */ + wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); + + /* WaDisablePSDDualDispatchEnable:ivb */ + if (IS_IVB_GT1(i915)) + wa_masked_en(wal, + GEN7_HALF_SLICE_CHICKEN1, + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); + + /* WaDisable_RenderCache_OperationalFlush:ivb */ + wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); + + /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ + wa_masked_dis(wal, + GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode:ivb */ + wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); + wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); + + /* WaForceL3Serialization:ivb */ + wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); + + /* + * WaVSThreadDispatchOverride:ivb,vlv + * + * This actually overrides the dispatch + * mode for all thread types. + */ + wa_write_masked_or(wal, GEN7_FF_THREAD_MODE, + GEN7_FF_SCHED_MASK, + GEN7_FF_TS_SCHED_HW | + GEN7_FF_VS_SCHED_HW | + GEN7_FF_DS_SCHED_HW); + + if (0) { /* causes HiZ corruption on ivb:gt1 */ + /* enable HiZ Raw Stall Optimization */ + wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); + } + + /* WaDisable4x2SubspanOptimization:ivb */ + wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, GEN7_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); +} + +static void +vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* WaDisableEarlyCull:vlv */ + wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); + + /* WaPsdDispatchEnable:vlv */ + /* WaDisablePSDDualDispatchEnable:vlv */ + wa_masked_en(wal, + GEN7_HALF_SLICE_CHICKEN1, + GEN7_MAX_PS_THREAD_DEP | + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); + + /* WaDisable_RenderCache_OperationalFlush:vlv */ + wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); + + /* WaForceL3Serialization:vlv */ + wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); + + /* + * WaVSThreadDispatchOverride:ivb,vlv + * + * This actually overrides the dispatch + * mode for all thread types. + */ + wa_write_masked_or(wal, + GEN7_FF_THREAD_MODE, + GEN7_FF_SCHED_MASK, + GEN7_FF_TS_SCHED_HW | + GEN7_FF_VS_SCHED_HW | + GEN7_FF_DS_SCHED_HW); + + /* + * BSpec says this must be set, even though + * WaDisable4x2SubspanOptimization isn't listed for VLV. + */ + wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, GEN7_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); + + /* + * WaIncreaseL3CreditsForVLVB0:vlv + * This is the hardware default actually. + */ + wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); +} + +static void +hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + /* L3 caching of data atomics doesn't work -- disable it. */ + wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); + + wa_add(wal, + HSW_ROW_CHICKEN3, 0, + _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE), + 0 /* XXX does this reg exist? */); + + /* WaVSRefCountFullforceMissDisable:hsw */ + wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME); + + wa_masked_dis(wal, + CACHE_MODE_0_GEN7, + /* WaDisable_RenderCache_OperationalFlush:hsw */ + RC_OP_FLUSH_ENABLE | + /* enable HiZ Raw Stall Optimization */ + HIZ_RAW_STALL_OPT_DISABLE); + + /* WaDisable4x2SubspanOptimization:hsw */ + wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, GEN7_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); + + /* WaSampleCChickenBitEnable:hsw */ + wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE); +} + static void gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { @@ -963,6 +1190,20 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) bxt_gt_workarounds_init(i915, wal); else if (IS_SKYLAKE(i915)) skl_gt_workarounds_init(i915, wal); + else if (IS_HASWELL(i915)) + hsw_gt_workarounds_init(i915, wal); + else if (IS_VALLEYVIEW(i915)) + vlv_gt_workarounds_init(i915, wal); + else if (IS_IVYBRIDGE(i915)) + ivb_gt_workarounds_init(i915, wal); + else if (IS_GEN(i915, 6)) + snb_gt_workarounds_init(i915, wal); + else if (IS_GEN(i915, 5)) + ilk_gt_workarounds_init(i915, wal); + else if (IS_G4X(i915)) + g4x_gt_workarounds_init(i915, wal); + else if (IS_GEN(i915, 4)) + gen4_gt_workarounds_init(i915, wal); else if (INTEL_GEN(i915) <= 8) return; else diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 2b2efff6e19d..4aa4cc917d8b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -310,22 +310,20 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) 1000)); } -static void engine_heartbeat_disable(struct intel_engine_cs *engine, - unsigned long *saved) +static void engine_heartbeat_disable(struct intel_engine_cs *engine) { - *saved = engine->props.heartbeat_interval_ms; engine->props.heartbeat_interval_ms = 0; intel_engine_pm_get(engine); intel_engine_park_heartbeat(engine); } -static void engine_heartbeat_enable(struct intel_engine_cs *engine, - unsigned long saved) +static void engine_heartbeat_enable(struct intel_engine_cs *engine) { intel_engine_pm_put(engine); - engine->props.heartbeat_interval_ms = saved; + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; } static int igt_hang_sanitycheck(void *arg) @@ -473,7 +471,6 @@ static int igt_reset_nop_engine(void *arg) for_each_engine(engine, gt, id) { unsigned int reset_count, reset_engine_count, count; struct intel_context *ce; - unsigned long heartbeat; IGT_TIMEOUT(end_time); int err; @@ -485,7 +482,7 @@ static int igt_reset_nop_engine(void *arg) reset_engine_count = i915_reset_engine_count(global, engine); count = 0; - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { int i; @@ -529,7 +526,7 @@ static int igt_reset_nop_engine(void *arg) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -564,7 +561,6 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) for_each_engine(engine, gt, id) { unsigned int reset_count, reset_engine_count; - unsigned long heartbeat; IGT_TIMEOUT(end_time); if (active && !intel_engine_can_store_dword(engine)) @@ -580,7 +576,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_count = i915_reset_count(global); reset_engine_count = i915_reset_engine_count(global, engine); - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { if (active) { @@ -632,7 +628,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); if (err) break; @@ -789,7 +785,6 @@ static int __igt_reset_engines(struct intel_gt *gt, struct active_engine threads[I915_NUM_ENGINES] = {}; unsigned long device = i915_reset_count(global); unsigned long count = 0, reported; - unsigned long heartbeat; IGT_TIMEOUT(end_time); if (flags & TEST_ACTIVE && @@ -832,7 +827,7 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; @@ -906,7 +901,7 @@ static int __igt_reset_engines(struct intel_gt *gt, } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", engine->name, test_name, count); diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 824f99c4cc7c..924bc01ef526 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -51,22 +51,20 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) return vma; } -static void engine_heartbeat_disable(struct intel_engine_cs *engine, - unsigned long *saved) +static void engine_heartbeat_disable(struct intel_engine_cs *engine) { - *saved = engine->props.heartbeat_interval_ms; engine->props.heartbeat_interval_ms = 0; intel_engine_pm_get(engine); intel_engine_park_heartbeat(engine); } -static void engine_heartbeat_enable(struct intel_engine_cs *engine, - unsigned long saved) +static void engine_heartbeat_enable(struct intel_engine_cs *engine) { intel_engine_pm_put(engine); - engine->props.heartbeat_interval_ms = saved; + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; } static bool is_active(struct i915_request *rq) @@ -224,7 +222,6 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) struct intel_context *ce[2] = {}; struct i915_request *rq[2]; struct igt_live_test t; - unsigned long saved; int n; if (prio && !intel_engine_has_preemption(engine)) @@ -237,7 +234,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) err = -EIO; break; } - engine_heartbeat_disable(engine, &saved); + engine_heartbeat_disable(engine); for (n = 0; n < ARRAY_SIZE(ce); n++) { struct intel_context *tmp; @@ -345,7 +342,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) intel_context_put(ce[n]); } - engine_heartbeat_enable(engine, saved); + engine_heartbeat_enable(engine); if (igt_live_test_end(&t)) err = -EIO; if (err) @@ -466,7 +463,6 @@ static int live_hold_reset(void *arg) for_each_engine(engine, gt, id) { struct intel_context *ce; - unsigned long heartbeat; struct i915_request *rq; ce = intel_context_create(engine); @@ -475,7 +471,7 @@ static int live_hold_reset(void *arg) break; } - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -535,7 +531,7 @@ static int live_hold_reset(void *arg) i915_request_put(rq); out: - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); intel_context_put(ce); if (err) break; @@ -580,10 +576,9 @@ static int live_error_interrupt(void *arg) for_each_engine(engine, gt, id) { const struct error_phase *p; - unsigned long heartbeat; int err = 0; - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); for (p = phases; p->error[0] != GOOD; p++) { struct i915_request *client[ARRAY_SIZE(phases->error)]; @@ -682,7 +677,7 @@ static int live_error_interrupt(void *arg) } } - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); if (err) { intel_gt_set_wedged(gt); return err; @@ -828,7 +823,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer, } } - err = release_queue(outer, vma, n, INT_MAX); + err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER); if (err) goto out; @@ -895,16 +890,14 @@ static int live_timeslice_preempt(void *arg) enum intel_engine_id id; for_each_engine(engine, gt, id) { - unsigned long saved; - if (!intel_engine_has_preemption(engine)) continue; memset(vaddr, 0, PAGE_SIZE); - engine_heartbeat_disable(engine, &saved); + engine_heartbeat_disable(engine); err = slice_semaphore_queue(engine, vma, count); - engine_heartbeat_enable(engine, saved); + engine_heartbeat_enable(engine); if (err) goto err_pin; @@ -1009,7 +1002,6 @@ static int live_timeslice_rewind(void *arg) enum { X = 1, Z, Y }; struct i915_request *rq[3] = {}; struct intel_context *ce; - unsigned long heartbeat; unsigned long timeslice; int i, err = 0; u32 *slot; @@ -1028,7 +1020,7 @@ static int live_timeslice_rewind(void *arg) * Expect execution/evaluation order XZY */ - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); timeslice = xchg(&engine->props.timeslice_duration_ms, 1); slot = memset32(engine->status_page.addr + 1000, 0, 4); @@ -1122,7 +1114,7 @@ static int live_timeslice_rewind(void *arg) wmb(); engine->props.timeslice_duration_ms = timeslice; - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); for (i = 0; i < 3; i++) i915_request_put(rq[i]); if (igt_flush_test(gt->i915)) @@ -1202,12 +1194,11 @@ static int live_timeslice_queue(void *arg) .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), }; struct i915_request *rq, *nop; - unsigned long saved; if (!intel_engine_has_preemption(engine)) continue; - engine_heartbeat_disable(engine, &saved); + engine_heartbeat_disable(engine); memset(vaddr, 0, PAGE_SIZE); /* ELSP[0]: semaphore wait */ @@ -1284,7 +1275,7 @@ static int live_timeslice_queue(void *arg) err_rq: i915_request_put(rq); err_heartbeat: - engine_heartbeat_enable(engine, saved); + engine_heartbeat_enable(engine); if (err) break; } @@ -1298,6 +1289,121 @@ static int live_timeslice_queue(void *arg) return err; } +static int live_timeslice_nopreempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * We should not timeslice into a request that is marked with + * I915_REQUEST_NOPREEMPT. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + unsigned long timeslice; + + if (!intel_engine_has_preemption(engine)) + continue; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + engine_heartbeat_disable(engine); + timeslice = xchg(&engine->props.timeslice_duration_ms, 1); + + /* Create an unpreemptible spinner */ + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_heartbeat; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + i915_request_put(rq); + err = -ETIME; + goto out_spin; + } + + set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); + i915_request_put(rq); + + /* Followed by a maximum priority barrier (heartbeat) */ + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(rq); + goto out_spin; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_spin; + } + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_get(rq); + i915_request_add(rq); + + /* + * Wait until the barrier is in ELSP, and we know timeslicing + * will have been activated. + */ + if (wait_for_submit(engine, rq, HZ / 2)) { + i915_request_put(rq); + err = -ETIME; + goto out_spin; + } + + /* + * Since the ELSP[0] request is unpreemptible, it should not + * allow the maximum priority barrier through. Wait long + * enough to see if it is timesliced in by mistake. + */ + if (i915_request_wait(rq, 0, timeslice_threshold(engine)) >= 0) { + pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n", + engine->name); + err = -EINVAL; + } + i915_request_put(rq); + +out_spin: + igt_spinner_end(&spin); +out_heartbeat: + xchg(&engine->props.timeslice_duration_ms, timeslice); + engine_heartbeat_enable(engine); + if (err) + break; + + if (igt_flush_test(gt->i915)) { + err = -EIO; + break; + } + } + + igt_spinner_fini(&spin); + return err; +} + static int live_busywait_preempt(void *arg) { struct intel_gt *gt = arg; @@ -4153,7 +4259,6 @@ static int reset_virtual_engine(struct intel_gt *gt, { struct intel_engine_cs *engine; struct intel_context *ve; - unsigned long *heartbeat; struct igt_spinner spin; struct i915_request *rq; unsigned int n; @@ -4165,15 +4270,9 @@ static int reset_virtual_engine(struct intel_gt *gt, * descendents are not executed while the capture is in progress. */ - heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL); - if (!heartbeat) + if (igt_spinner_init(&spin, gt)) return -ENOMEM; - if (igt_spinner_init(&spin, gt)) { - err = -ENOMEM; - goto out_free; - } - ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); @@ -4181,7 +4280,7 @@ static int reset_virtual_engine(struct intel_gt *gt, } for (n = 0; n < nsibling; n++) - engine_heartbeat_disable(siblings[n], &heartbeat[n]); + engine_heartbeat_disable(siblings[n]); rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -4252,13 +4351,11 @@ static int reset_virtual_engine(struct intel_gt *gt, i915_request_put(rq); out_heartbeat: for (n = 0; n < nsibling; n++) - engine_heartbeat_enable(siblings[n], heartbeat[n]); + engine_heartbeat_enable(siblings[n]); intel_context_put(ve); out_spin: igt_spinner_fini(&spin); -out_free: - kfree(heartbeat); return err; } @@ -4314,6 +4411,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_timeslice_preempt), SUBTEST(live_timeslice_rewind), SUBTEST(live_timeslice_queue), + SUBTEST(live_timeslice_nopreempt), SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), SUBTEST(live_late_preempt), @@ -4932,9 +5030,7 @@ static int live_lrc_gpr(void *arg) return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - unsigned long heartbeat; - - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); err = __live_lrc_gpr(engine, scratch, false); if (err) @@ -4945,7 +5041,7 @@ static int live_lrc_gpr(void *arg) goto err; err: - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -5092,10 +5188,9 @@ static int live_lrc_timestamp(void *arg) */ for_each_engine(data.engine, gt, id) { - unsigned long heartbeat; int i, err = 0; - engine_heartbeat_disable(data.engine, &heartbeat); + engine_heartbeat_disable(data.engine); for (i = 0; i < ARRAY_SIZE(data.ce); i++) { struct intel_context *tmp; @@ -5128,7 +5223,7 @@ static int live_lrc_timestamp(void *arg) } err: - engine_heartbeat_enable(data.engine, heartbeat); + engine_heartbeat_enable(data.engine); for (i = 0; i < ARRAY_SIZE(data.ce); i++) { if (!data.ce[i]) break; diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index 8831ffee2061..63f87d8608c3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -18,6 +18,20 @@ struct live_mocs { void *vaddr; }; +static struct intel_context *mocs_context_create(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ce; + + /* We build large requests to read the registers from the ring */ + ce->ring = __intel_context_ring_size(SZ_16K); + + return ce; +} + static int request_add_sync(struct i915_request *rq, int err) { i915_request_get(rq); @@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg) for_each_engine(engine, gt, id) { struct intel_context *ce; - ce = intel_context_create(engine); + ce = mocs_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); break; @@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg) for_each_engine(engine, gt, id) { struct intel_context *ce; - ce = intel_context_create(engine); + ce = mocs_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); break; diff --git a/drivers/gpu/drm/i915/gt/selftest_ring.c b/drivers/gpu/drm/i915/gt/selftest_ring.c new file mode 100644 index 000000000000..2a8c534dc125 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_ring.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2020 Intel Corporation + */ + +static struct intel_ring *mock_ring(unsigned long sz) +{ + struct intel_ring *ring; + + ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); + if (!ring) + return NULL; + + kref_init(&ring->ref); + ring->size = sz; + ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz); + ring->effective_size = sz; + ring->vaddr = (void *)(ring + 1); + atomic_set(&ring->pin_count, 1); + + intel_ring_update_space(ring); + + return ring; +} + +static void mock_ring_free(struct intel_ring *ring) +{ + kfree(ring); +} + +static int check_ring_direction(struct intel_ring *ring, + u32 next, u32 prev, + int expected) +{ + int result; + + result = intel_ring_direction(ring, next, prev); + if (result < 0) + result = -1; + else if (result > 0) + result = 1; + + if (result != expected) { + pr_err("intel_ring_direction(%u, %u):%d != %d\n", + next, prev, result, expected); + return -EINVAL; + } + + return 0; +} + +static int check_ring_step(struct intel_ring *ring, u32 x, u32 step) +{ + u32 prev = x, next = intel_ring_wrap(ring, x + step); + int err = 0; + + err |= check_ring_direction(ring, next, next, 0); + err |= check_ring_direction(ring, prev, prev, 0); + err |= check_ring_direction(ring, next, prev, 1); + err |= check_ring_direction(ring, prev, next, -1); + + return err; +} + +static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step) +{ + int err = 0; + + err |= check_ring_step(ring, x, step); + err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step); + err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step); + + return err; +} + +static int igt_ring_direction(void *dummy) +{ + struct intel_ring *ring; + unsigned int half = 2048; + int step, err = 0; + + ring = mock_ring(2 * half); + if (!ring) + return -ENOMEM; + + GEM_BUG_ON(ring->size != 2 * half); + + /* Precision of wrap detection is limited to ring->size / 2 */ + for (step = 1; step < half; step <<= 1) { + err |= check_ring_offset(ring, 0, step); + err |= check_ring_offset(ring, half, step); + } + err |= check_ring_step(ring, 0, half - 64); + + /* And check unwrapped handling for good measure */ + err |= check_ring_offset(ring, 0, 2 * half + 64); + err |= check_ring_offset(ring, 3 * half, 1); + + mock_ring_free(ring); + return err; +} + +int intel_ring_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_ring_direction), + }; + + return i915_subtests(tests, NULL); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 6275d69aa9cc..5049c3dd08a6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -20,24 +20,20 @@ /* Try to isolate the impact of cstates from determing frequency response */ #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */ -static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine) +static void engine_heartbeat_disable(struct intel_engine_cs *engine) { - unsigned long old; - - old = fetch_and_zero(&engine->props.heartbeat_interval_ms); + engine->props.heartbeat_interval_ms = 0; intel_engine_pm_get(engine); intel_engine_park_heartbeat(engine); - - return old; } -static void engine_heartbeat_enable(struct intel_engine_cs *engine, - unsigned long saved) +static void engine_heartbeat_enable(struct intel_engine_cs *engine) { intel_engine_pm_put(engine); - engine->props.heartbeat_interval_ms = saved; + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; } static void dummy_rps_work(struct work_struct *wrk) @@ -246,7 +242,6 @@ int live_rps_clock_interval(void *arg) intel_gt_check_clock_frequency(gt); for_each_engine(engine, gt, id) { - unsigned long saved_heartbeat; struct i915_request *rq; u32 cycles; u64 dt; @@ -254,13 +249,13 @@ int live_rps_clock_interval(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) { - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); err = PTR_ERR(rq); break; } @@ -271,7 +266,7 @@ int live_rps_clock_interval(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -327,7 +322,7 @@ int live_rps_clock_interval(void *arg) intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); if (err == 0) { u64 time = intel_gt_pm_interval_to_ns(gt, cycles); @@ -405,7 +400,6 @@ int live_rps_control(void *arg) intel_gt_pm_get(gt); for_each_engine(engine, gt, id) { - unsigned long saved_heartbeat; struct i915_request *rq; ktime_t min_dt, max_dt; int f, limit; @@ -414,7 +408,7 @@ int live_rps_control(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, @@ -430,7 +424,7 @@ int live_rps_control(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -440,7 +434,7 @@ int live_rps_control(void *arg) pr_err("%s: could not set minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); show_pstate_limits(rps); err = -EINVAL; break; @@ -457,7 +451,7 @@ int live_rps_control(void *arg) pr_err("%s: could not restore minimum frequency [%x], only %x!\n", engine->name, rps->min_freq, read_cagf(rps)); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); show_pstate_limits(rps); err = -EINVAL; break; @@ -472,7 +466,7 @@ int live_rps_control(void *arg) min_dt = ktime_sub(ktime_get(), min_dt); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n", engine->name, @@ -635,7 +629,6 @@ int live_rps_frequency_cs(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { - unsigned long saved_heartbeat; struct i915_request *rq; struct i915_vma *vma; u32 *cancel, *cntr; @@ -644,14 +637,14 @@ int live_rps_frequency_cs(void *arg) int freq; } min, max; - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); vma = create_spin_counter(engine, engine->kernel_context->vm, false, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); break; } @@ -732,7 +725,7 @@ int live_rps_frequency_cs(void *arg) i915_vma_unpin(vma); i915_vma_put(vma); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -778,7 +771,6 @@ int live_rps_frequency_srm(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { - unsigned long saved_heartbeat; struct i915_request *rq; struct i915_vma *vma; u32 *cancel, *cntr; @@ -787,14 +779,14 @@ int live_rps_frequency_srm(void *arg) int freq; } min, max; - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); vma = create_spin_counter(engine, engine->kernel_context->vm, true, &cancel, &cntr); if (IS_ERR(vma)) { err = PTR_ERR(vma); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); break; } @@ -874,7 +866,7 @@ int live_rps_frequency_srm(void *arg) i915_vma_unpin(vma); i915_vma_put(vma); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); if (igt_flush_test(gt->i915)) err = -EIO; if (err) @@ -1066,16 +1058,14 @@ int live_rps_interrupt(void *arg) for_each_engine(engine, gt, id) { /* Keep the engine busy with a spinner; expect an UP! */ if (pm_events & GEN6_PM_RP_UP_THRESHOLD) { - unsigned long saved_heartbeat; - intel_gt_pm_wait_for_idle(engine->gt); GEM_BUG_ON(intel_rps_is_active(rps)); - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); err = __rps_up_interrupt(rps, engine, &spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); if (err) goto out; @@ -1084,15 +1074,13 @@ int live_rps_interrupt(void *arg) /* Keep the engine awake but idle and check for DOWN */ if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) { - unsigned long saved_heartbeat; - - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); intel_rc6_disable(>->rc6); err = __rps_down_interrupt(rps, engine); intel_rc6_enable(>->rc6); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); if (err) goto out; } @@ -1168,7 +1156,6 @@ int live_rps_power(void *arg) rps->work.func = dummy_rps_work; for_each_engine(engine, gt, id) { - unsigned long saved_heartbeat; struct i915_request *rq; struct { u64 power; @@ -1178,13 +1165,13 @@ int live_rps_power(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - saved_heartbeat = engine_heartbeat_disable(engine); + engine_heartbeat_disable(engine); rq = igt_spinner_create_request(&spin, engine->kernel_context, MI_NOOP); if (IS_ERR(rq)) { - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); err = PTR_ERR(rq); break; } @@ -1195,7 +1182,7 @@ int live_rps_power(void *arg) pr_err("%s: RPS spinner did not start\n", engine->name); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); intel_gt_set_wedged(engine->gt); err = -EIO; break; @@ -1208,7 +1195,7 @@ int live_rps_power(void *arg) min.power = measure_power_at(rps, &min.freq); igt_spinner_end(&spin); - engine_heartbeat_enable(engine, saved_heartbeat); + engine_heartbeat_enable(engine); pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", engine->name, diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index c2578a0f2f14..ef1c35073dc0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -751,22 +751,20 @@ static int live_hwsp_wrap(void *arg) return err; } -static void engine_heartbeat_disable(struct intel_engine_cs *engine, - unsigned long *saved) +static void engine_heartbeat_disable(struct intel_engine_cs *engine) { - *saved = engine->props.heartbeat_interval_ms; engine->props.heartbeat_interval_ms = 0; intel_engine_pm_get(engine); intel_engine_park_heartbeat(engine); } -static void engine_heartbeat_enable(struct intel_engine_cs *engine, - unsigned long saved) +static void engine_heartbeat_enable(struct intel_engine_cs *engine) { intel_engine_pm_put(engine); - engine->props.heartbeat_interval_ms = saved; + engine->props.heartbeat_interval_ms = + engine->defaults.heartbeat_interval_ms; } static int live_hwsp_rollover_kernel(void *arg) @@ -785,10 +783,9 @@ static int live_hwsp_rollover_kernel(void *arg) struct intel_context *ce = engine->kernel_context; struct intel_timeline *tl = ce->timeline; struct i915_request *rq[3] = {}; - unsigned long heartbeat; int i; - engine_heartbeat_disable(engine, &heartbeat); + engine_heartbeat_disable(engine); if (intel_gt_wait_for_idle(gt, HZ / 2)) { err = -EIO; goto out; @@ -839,7 +836,7 @@ static int live_hwsp_rollover_kernel(void *arg) out: for (i = 0; i < ARRAY_SIZE(rq); i++) i915_request_put(rq[i]); - engine_heartbeat_enable(engine, heartbeat); + engine_heartbeat_enable(engine); if (err) break; } diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 5ed323254ee1..32785463ec9e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -623,6 +623,8 @@ static int check_dirty_whitelist(struct intel_context *ce) err = -EINVAL; goto out_unpin; } + } else { + rsvd = 0; } expect = results[0]; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4dc601dffc08..284cf078135a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3125,6 +3125,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) val = I915_READ(GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; + val |= ~enabled_irqs & hotplug_irqs; I915_WRITE(GEN11_DE_HPD_IMR, val); POSTING_READ(GEN11_DE_HPD_IMR); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index e991a707bdb7..962ded9ce73f 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -269,12 +269,48 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) return IS_GEN(i915, 7); } +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + bool busy; + u32 val; + + val = ENGINE_READ_FW(engine, RING_CTL); + if (val == 0) /* powerwell off => engine idle */ + return; + + if (val & RING_WAIT) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + if (val & RING_WAIT_SEMAPHORE) + add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); + + /* No need to sample when busy stats are supported. */ + if (intel_engine_supports_stats(engine)) + return; + + /* + * While waiting on a semaphore or event, MI_MODE reports the + * ring as idle. However, previously using the seqno, and with + * execlists sampling, we account for the ring waiting as the + * engine being busy. Therefore, we record the sample as being + * busy if either waiting or !idle. + */ + busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); + if (!busy) { + val = ENGINE_READ_FW(engine, RING_MI_MODE); + busy = !(val & MODE_IDLE); + } + if (busy) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { struct drm_i915_private *i915 = gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; + unsigned long flags; if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; @@ -283,53 +319,17 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) return; for_each_engine(engine, gt, id) { - struct intel_engine_pmu *pmu = &engine->pmu; - spinlock_t *mmio_lock; - unsigned long flags; - bool busy; - u32 val; - if (!intel_engine_pm_get_if_awake(engine)) continue; - mmio_lock = NULL; - if (exclusive_mmio_access(i915)) - mmio_lock = &engine->uncore->lock; - - if (unlikely(mmio_lock)) - spin_lock_irqsave(mmio_lock, flags); - - val = ENGINE_READ_FW(engine, RING_CTL); - if (val == 0) /* powerwell off => engine idle */ - goto skip; - - if (val & RING_WAIT) - add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); - if (val & RING_WAIT_SEMAPHORE) - add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); - - /* No need to sample when busy stats are supported. */ - if (intel_engine_supports_stats(engine)) - goto skip; - - /* - * While waiting on a semaphore or event, MI_MODE reports the - * ring as idle. However, previously using the seqno, and with - * execlists sampling, we account for the ring waiting as the - * engine being busy. Therefore, we record the sample as being - * busy if either waiting or !idle. - */ - busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); - if (!busy) { - val = ENGINE_READ_FW(engine, RING_MI_MODE); - busy = !(val & MODE_IDLE); + if (exclusive_mmio_access(i915)) { + spin_lock_irqsave(&engine->uncore->lock, flags); + engine_sample(engine, period_ns); + spin_unlock_irqrestore(&engine->uncore->lock, flags); + } else { + engine_sample(engine, period_ns); } - if (busy) - add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); -skip: - if (unlikely(mmio_lock)) - spin_unlock_irqrestore(mmio_lock, flags); intel_engine_pm_put_async(engine); } } diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h index 5003a71113cb..8aa7866ec6b6 100644 --- a/drivers/gpu/drm/i915/i915_priolist_types.h +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -42,7 +42,7 @@ enum { * active request. */ #define I915_PRIORITY_UNPREEMPTABLE INT_MAX -#define I915_PRIORITY_BARRIER INT_MAX +#define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1) struct i915_priolist { struct list_head requests[I915_PRIORITY_COUNT]; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7717581350bd..06cd1d28a176 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7896,7 +7896,7 @@ enum { /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) - #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26)) + #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10) #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 696491d71a1d..07f663cd2d1c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6830,16 +6830,6 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(ILK_DISPLAY_CHICKEN2, I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); - I915_WRITE(_3D_CHICKEN2, - _3D_CHICKEN2_WM_READ_PIPELINED << 16 | - _3D_CHICKEN2_WM_READ_PIPELINED); - - /* WaDisableRenderCachePipelinedFlush:ilk */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); - - /* WaDisable_RenderCache_OperationalFlush:ilk */ - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); g4x_disable_trickle_feed(dev_priv); @@ -6902,27 +6892,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); - /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ - I915_WRITE(_3D_CHICKEN, - _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - - /* WaDisable_RenderCache_OperationalFlush:snb */ - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); - - /* - * BSpec recoomends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - I915_WRITE(GEN6_GT_MODE, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_BLBUNIT_CLOCK_GATE_DISABLE | @@ -6945,18 +6914,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); - /* WaStripsFansDisableFastClipPerformanceFix:snb */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); - - /* - * Bspec says: - * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and - * 3DSTATE_SF number of SF output attributes is more than 16." - */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); - /* * According to the spec the following bits should be * set in order to enable memory self-refresh and fbc: @@ -6986,24 +6943,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) gen6_check_mch_setup(dev_priv); } -static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) -{ - u32 reg = I915_READ(GEN7_FF_THREAD_MODE); - - /* - * WaVSThreadDispatchOverride:ivb,vlv - * - * This actually overrides the dispatch - * mode for all thread types. - */ - reg &= ~GEN7_FF_SCHED_MASK; - reg |= GEN7_FF_TS_SCHED_HW; - reg |= GEN7_FF_VS_SCHED_HW; - reg |= GEN7_FF_DS_SCHED_HW; - - I915_WRITE(GEN7_FF_THREAD_MODE, reg); -} - static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -7230,45 +7169,10 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { - /* L3 caching of data atomics doesn't work -- disable it. */ - I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); - I915_WRITE(HSW_ROW_CHICKEN3, - _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); - /* This is required by WaCatErrorRejectionIssue:hsw */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | - GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - - /* WaVSRefCountFullforceMissDisable:hsw */ - I915_WRITE(GEN7_FF_THREAD_MODE, - I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); - - /* WaDisable_RenderCache_OperationalFlush:hsw */ - I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); - - /* enable HiZ Raw Stall Optimization */ - I915_WRITE(CACHE_MODE_0_GEN7, - _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); - - /* WaDisable4x2SubspanOptimization:hsw */ - I915_WRITE(CACHE_MODE_1, - _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - I915_WRITE(GEN7_GT_MODE, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - - /* WaSampleCChickenBitEnable:hsw */ - I915_WRITE(HALF_SLICE_CHICKEN3, - _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -7282,32 +7186,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); - /* WaDisableEarlyCull:ivb */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); - /* WaDisableBackToBackFlipFix:ivb */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - /* WaDisablePSDDualDispatchEnable:ivb */ - if (IS_IVB_GT1(dev_priv)) - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - - /* WaDisable_RenderCache_OperationalFlush:ivb */ - I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); - - /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); - - /* WaApplyL3ControlAndL3ChickenMode:ivb */ - I915_WRITE(GEN7_L3CNTLREG1, - GEN7_WA_FOR_GEN7_L3_CONTROL); - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, - GEN7_WA_L3_CHICKEN_MODE); if (IS_IVB_GT1(dev_priv)) I915_WRITE(GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); @@ -7319,10 +7202,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); } - /* WaForceL3Serialization:ivb */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - /* * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:ivb workaround. @@ -7337,29 +7216,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) g4x_disable_trickle_feed(dev_priv); - gen7_setup_fixed_func_scheduler(dev_priv); - - if (0) { /* causes HiZ corruption on ivb:gt1 */ - /* enable HiZ Raw Stall Optimization */ - I915_WRITE(CACHE_MODE_0_GEN7, - _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); - } - - /* WaDisable4x2SubspanOptimization:ivb */ - I915_WRITE(CACHE_MODE_1, - _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - I915_WRITE(GEN7_GT_MODE, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= GEN6_MBC_SNPCR_MED; @@ -7373,28 +7229,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) { - /* WaDisableEarlyCull:vlv */ - I915_WRITE(_3D_CHICKEN3, - _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); - /* WaDisableBackToBackFlipFix:vlv */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); - /* WaPsdDispatchEnable:vlv */ - /* WaDisablePSDDualDispatchEnable:vlv */ - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, - _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | - GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); - - /* WaDisable_RenderCache_OperationalFlush:vlv */ - I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); - - /* WaForceL3Serialization:vlv */ - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); - /* WaDisableDopClockGating:vlv */ I915_WRITE(GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); @@ -7404,8 +7243,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - gen7_setup_fixed_func_scheduler(dev_priv); - /* * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:vlv workaround. @@ -7419,30 +7256,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_UCGCTL4, I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); - /* - * BSpec says this must be set, even though - * WaDisable4x2SubspanOptimization isn't listed for VLV. - */ - I915_WRITE(CACHE_MODE_1, - _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - I915_WRITE(GEN7_GT_MODE, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); - - /* - * WaIncreaseL3CreditsForVLVB0:vlv - * This is the hardware default actually. - */ - I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); - /* * WaDisableVLVClockGating_VBIIssue:vlv * Disable clock gating on th GCFG unit to prevent a delay @@ -7495,13 +7308,6 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, dspclk_gate); - /* WaDisableRenderCachePipelinedFlush */ - I915_WRITE(CACHE_MODE_0, - _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); - - /* WaDisable_RenderCache_OperationalFlush:g4x */ - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); - g4x_disable_trickle_feed(dev_priv); } @@ -7517,11 +7323,6 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); - - /* WaDisable_RenderCache_OperationalFlush:gen4 */ - intel_uncore_write(uncore, - CACHE_MODE_0, - _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); } static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7534,9 +7335,6 @@ static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(RENCLK_GATE_D2, 0); I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); - - /* WaDisable_RenderCache_OperationalFlush:gen4 */ - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); } static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 6a2be7d0dd95..6090ce35226b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) +selftest(ring, intel_ring_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) selftest(timelines, intel_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 04e1d38d41f7..08802e5177f6 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc *crtc = &pipe->crtc; struct drm_plane *plane = &pipe->plane; struct drm_device *drm = crtc->dev; - struct mcde *mcde = drm->dev_private; + struct mcde *mcde = to_mcde(drm); const struct drm_display_mode *mode = &cstate->mode; struct drm_framebuffer *fb = plane->state->fb; u32 format = fb->format->format; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 84f3e2dbd77b..80082d6dce3a 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm) drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); - drm_fbdev_generic_setup(drm, 32); return 0; } @@ -264,6 +263,8 @@ static int mcde_drm_bind(struct device *dev) if (ret < 0) goto unbind; + drm_fbdev_generic_setup(drm, 32); + return 0; unbind: diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index b6ecd1552132..5178f87d6574 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2495,6 +2495,7 @@ static const struct panel_desc logicpd_type_28 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct panel_desc mitsubishi_aa070mc01 = { @@ -2663,6 +2664,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct display_timing nlt_nl192108ac18_02d_timing = { diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index b57c37ddd164..c7fbb7932f37 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 0919f1f159a4..f65d1489dc50 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI config DRM_RCAR_LVDS tristate "R-Car DU LVDS Encoder Support" depends on DRM && DRM_BRIDGE && OF + select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 56cc037fd312..cc4fb916318f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -363,6 +363,19 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, mixer->engine.ops = &sun8i_engine_ops; mixer->engine.node = dev->of_node; + if (of_find_property(dev->of_node, "iommus", NULL)) { + /* + * This assume we have the same DMA constraints for + * all our the mixers in our pipeline. This sounds + * bad, but it has always been the case for us, and + * DRM doesn't do per-device allocation either, so we + * would need to fix DRM first... + */ + ret = of_dma_configure(drm->dev, dev->of_node, true); + if (ret) + return ret; + } + /* * While this function can fail, we shouldn't do anything * if this happens. Some early DE2 DT entries don't provide diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 83f31c6e891c..04d6848d19fc 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -957,6 +957,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm, } drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs); + drm_plane_create_zpos_immutable_property(&plane->base, 255); return &plane->base; } diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 8183e617bf6b..22a03f7ffdc1 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_enable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_enable(wgrp); } return 0; @@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub) for (i = 0; i < hub->soc->num_wgrps; i++) { struct tegra_windowgroup *wgrp = &hub->wgrps[i]; - tegra_windowgroup_disable(wgrp); + /* Skip orphaned window group whose parent DC is disabled */ + if (wgrp->parent) + tegra_windowgroup_disable(wgrp); } } @@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); + err = devm_of_platform_populate(&pdev->dev); + if (err < 0) + goto unregister; + + return err; + +unregister: + host1x_client_unregister(&hub->client); + pm_runtime_disable(&pdev->dev); return err; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f73b81c2576e..0f20e14a4cfd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -883,8 +883,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, if (!fence) return 0; - if (no_wait_gpu) + if (no_wait_gpu) { + dma_fence_put(fence); return -EBUSY; + } dma_resv_add_shared_fence(bo->base.resv, fence); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index a43aa7275f12..fa03fab02076 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, break; case -EBUSY: case -ERESTARTSYS: + dma_fence_put(moving); return VM_FAULT_NOPAGE; default: + dma_fence_put(moving); return VM_FAULT_SIGBUS; } diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 6a995db51d6d..e201f62d62c0 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full); */ void host1x_driver_unregister(struct host1x_driver *driver) { + struct host1x *host1x; + driver_unregister(&driver->driver); + mutex_lock(&devices_lock); + + list_for_each_entry(host1x, &devices, list) + host1x_detach_driver(host1x, driver); + + mutex_unlock(&devices_lock); + mutex_lock(&drivers_lock); list_del_init(&driver->list); mutex_unlock(&drivers_lock); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index d24344e91922..d0ebb70e2fdd 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev) err = host1x_register(host); if (err < 0) - goto deinit_intr; + goto deinit_debugfs; + + err = devm_of_platform_populate(&pdev->dev); + if (err < 0) + goto unregister; return 0; -deinit_intr: +unregister: + host1x_unregister(host); +deinit_debugfs: + host1x_debug_deinit(host); host1x_intr_deinit(host); deinit_syncpt: host1x_syncpt_deinit(host); diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 4f932a419752..603b4a9969d3 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c @@ -34,7 +34,7 @@ struct stp_policy_node { unsigned int first_channel; unsigned int last_channel; /* this is the one that's exposed to the attributes */ - unsigned char priv[0]; + unsigned char priv[]; }; void *stp_policy_node_priv(struct stp_policy_node *pn) diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 3569439d53bb..a9be49fc7a6b 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h @@ -23,7 +23,7 @@ void *stp_policy_node_priv(struct stp_policy_node *pn); struct stp_master { unsigned int nr_free; - unsigned long chan_map[0]; + unsigned long chan_map[]; }; struct stm_device { @@ -42,7 +42,7 @@ struct stm_device { const struct config_item_type *pdrv_node_type; /* master allocation */ spinlock_t mc_lock; - struct stp_master *masters[0]; + struct stp_master *masters[]; }; #define to_stm_device(_d) \ diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e3a8640db7da..3c19aada4b30 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -286,10 +286,8 @@ int i2c_dw_acpi_configure(struct device *device) } EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure); -void i2c_dw_acpi_adjust_bus_speed(struct device *device) +static u32 i2c_dw_acpi_round_bus_speed(struct device *device) { - struct dw_i2c_dev *dev = dev_get_drvdata(device); - struct i2c_timings *t = &dev->timings; u32 acpi_speed; int i; @@ -300,9 +298,22 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device) */ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { if (acpi_speed >= supported_speeds[i]) - break; + return supported_speeds[i]; } - acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0; + + return 0; +} + +#else /* CONFIG_ACPI */ + +static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; } + +#endif /* CONFIG_ACPI */ + +void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev) +{ + u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev); + struct i2c_timings *t = &dev->timings; /* * Find bus speed from the "clock-frequency" device property, ACPI @@ -315,9 +326,7 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device) else t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; } -EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed); - -#endif /* CONFIG_ACPI */ +EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed); u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 556673a1f61b..eb5ef4d0f463 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -361,11 +361,10 @@ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; #endif int i2c_dw_validate_speed(struct dw_i2c_dev *dev); +void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev); #if IS_ENABLED(CONFIG_ACPI) int i2c_dw_acpi_configure(struct device *device); -void i2c_dw_acpi_adjust_bus_speed(struct device *device); #else static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; } -static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {} #endif diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 947c096f86e3..8522134f9ea9 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -240,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, } } - i2c_dw_acpi_adjust_bus_speed(&pdev->dev); + i2c_dw_adjust_bus_speed(dev); if (has_acpi_companion(&pdev->dev)) i2c_dw_acpi_configure(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0de4e302fc6a..c2efaaaac252 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -228,7 +228,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) else i2c_parse_fw_timings(&pdev->dev, t, false); - i2c_dw_acpi_adjust_bus_speed(&pdev->dev); + i2c_dw_adjust_bus_speed(dev); if (pdev->dev.of_node) dw_i2c_of_configure(pdev); diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c index e0c256922d4f..977d6f524649 100644 --- a/drivers/i2c/busses/i2c-fsi.c +++ b/drivers/i2c/busses/i2c-fsi.c @@ -98,7 +98,7 @@ #define I2C_STAT_DAT_REQ BIT(25) #define I2C_STAT_CMD_COMP BIT(24) #define I2C_STAT_STOP_ERR BIT(23) -#define I2C_STAT_MAX_PORT GENMASK(19, 16) +#define I2C_STAT_MAX_PORT GENMASK(22, 16) #define I2C_STAT_ANY_INT BIT(15) #define I2C_STAT_SCL_IN BIT(11) #define I2C_STAT_SDA_IN BIT(10) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index d1f278f73011..26f03a14a478 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -815,31 +815,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf } EXPORT_SYMBOL_GPL(i2c_new_client_device); -/** - * i2c_new_device - instantiate an i2c device - * @adap: the adapter managing the device - * @info: describes one I2C device; bus_num is ignored - * Context: can sleep - * - * This deprecated function has the same functionality as - * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of - * an error for compatibility with current I2C API. It will be removed once all - * users are converted. - * - * This returns the new i2c client, which may be saved for later use with - * i2c_unregister_device(); or NULL to indicate an error. - */ -struct i2c_client * -i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info) -{ - struct i2c_client *ret; - - ret = i2c_new_client_device(adap, info); - return IS_ERR(ret) ? NULL : ret; -} -EXPORT_SYMBOL_GPL(i2c_new_device); - - /** * i2c_unregister_device - reverse effect of i2c_new_*_device() * @client: value returned from i2c_new_*_device() diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index b34d2ff06931..f5c9787992e9 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -4,7 +4,7 @@ * * This file contains the SMBus functions which are always included in the I2C * core because they can be emulated via I2C. SMBus specific extensions - * (e.g. smbalert) are handled in a seperate i2c-smbus module. + * (e.g. smbalert) are handled in a separate i2c-smbus module. * * All SMBus-related things are written by Frodo Looijaard * SMBus 2.0 support by Mark Studebaker and @@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, break; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: + if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, + "Invalid block size returned: %d\n", + msg[1].buf[0]); + status = -EPROTO; + goto cleanup; + } for (i = 0; i < msg[1].buf[0] + 1; i++) data->block[i] = msg[1].buf[i]; break; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 9ce787e37e22..0d1377232933 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work) static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, struct cm_work *work) + __releases(&cm_id_priv->lock) { bool immediate; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3d7cc9f0f3d4..c30cf5307ce3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 2257d7f7810f..738d1faf4bba 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) return ret; } -static void counter_history_stat_update(const struct rdma_counter *counter) +static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; @@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) if (!port_counter->hstats) return; + rdma_counter_query_stats(counter); + for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 186e0d652e8b..a09f8e3c7f3f 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); flush_workqueue(port_priv->wq); - ib_cancel_rmpp_recvs(mad_agent_priv); deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_cancel_rmpp_recvs(mad_agent_priv); ib_mad_agent_security_cleanup(&mad_agent_priv->agent); @@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 38de4942c682..3027cd2fb247 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -470,40 +470,46 @@ static struct ib_uobject * alloc_begin_fd_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs) { - const struct uverbs_obj_fd_type *fd_type = - container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); + const struct uverbs_obj_fd_type *fd_type; int new_fd; - struct ib_uobject *uobj; + struct ib_uobject *uobj, *ret; struct file *filp; - if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && - fd_type->fops->release != &uverbs_async_event_release)) - return ERR_PTR(-EINVAL); - - new_fd = get_unused_fd_flags(O_CLOEXEC); - if (new_fd < 0) - return ERR_PTR(new_fd); - uobj = alloc_uobj(attrs, obj); if (IS_ERR(uobj)) + return uobj; + + fd_type = + container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); + if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && + fd_type->fops->release != &uverbs_async_event_release)) { + ret = ERR_PTR(-EINVAL); goto err_fd; + } + + new_fd = get_unused_fd_flags(O_CLOEXEC); + if (new_fd < 0) { + ret = ERR_PTR(new_fd); + goto err_fd; + } /* Note that uverbs_uobject_fd_release() is called during abort */ filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, fd_type->flags); if (IS_ERR(filp)) { - uverbs_uobject_put(uobj); - uobj = ERR_CAST(filp); - goto err_fd; + ret = ERR_CAST(filp); + goto err_getfile; } uobj->object = filp; uobj->id = new_fd; return uobj; -err_fd: +err_getfile: put_unused_fd(new_fd); - return uobj; +err_fd: + uverbs_uobject_put(uobj); + return ret; } struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 08313f7c73bc..7dd082441333 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev, props->max_send_sge = dev_attr->max_sq_sge; props->max_recv_sge = dev_attr->max_rq_sge; props->max_sge_rd = dev_attr->max_wr_rdma_sge; + props->max_pkeys = 1; if (udata && udata->outlen) { resp.max_sq_sge = dev_attr->max_sq_sge; diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index 4633a0ce1a8c..2ced236e1553 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int i2c1_debugfs_open(struct inode *in, struct file *fp) @@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } @@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp) static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) { struct hfi1_pportdata *ppd; - int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; ppd = private2ppd(fp); - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); - if (ret) /* failed - release the module */ - module_put(THIS_MODULE); - - return ret; + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); } static int qsfp1_debugfs_open(struct inode *in, struct file *fp) @@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) ppd = private2ppd(fp); release_chip_resource(ppd->dd, i2c_target(target)); - module_put(THIS_MODULE); return 0; } diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 07847cb72169..d580aa17ae37 100644 --- a/drivers/infiniband/hw/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h @@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w) * @wait_head: the wait queue * * This function is called to insert an iowait struct into a - * wait queue after a resource (eg, sdma decriptor or pio + * wait queue after a resource (eg, sdma descriptor or pio * buffer) is run out. */ static inline void iowait_queue(bool pkts_sent, struct iowait *w, diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h index 185c9b02c974..b8c9d0a003fb 100644 --- a/drivers/infiniband/hw/hfi1/ipoib.h +++ b/drivers/infiniband/hw/hfi1/ipoib.h @@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf { * @sde: sdma engine * @tx_list: tx request list * @sent_txreqs: count of txreqs posted to sdma + * @stops: count of stops of queue + * @ring_full: ring has been filled + * @no_desc: descriptor shortage seen * @flow: tracks when list needs to be flushed for a flow change * @q_idx: ipoib Tx queue index * @pkts_sent: indicator packets have been sent from this queue @@ -80,6 +83,9 @@ struct hfi1_ipoib_txq { struct sdma_engine *sde; struct list_head tx_list; u64 sent_txreqs; + atomic_t stops; + atomic_t ring_full; + atomic_t no_desc; union hfi1_ipoib_flow flow; u8 q_idx; bool pkts_sent; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index 883cb9d48022..9df292b51a05 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) return sent - completed; } +static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) +{ + return hfi1_ipoib_txreqs(txq->sent_txreqs, + atomic64_read(&txq->complete_txreqs)); +} + +static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) +{ + if (atomic_inc_return(&txq->stops) == 1) + netif_stop_subqueue(txq->priv->netdev, txq->q_idx); +} + +static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) +{ + if (atomic_dec_and_test(&txq->stops)) + netif_wake_subqueue(txq->priv->netdev, txq->q_idx); +} + +static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) +{ + return min_t(uint, txq->priv->netdev->tx_queue_len, + txq->tx_ring.max_items - 1); +} + +static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) +{ + return min_t(uint, txq->priv->netdev->tx_queue_len, + txq->tx_ring.max_items) >> 1; +} + static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) { - if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs, - atomic64_read(&txq->complete_txreqs)) >= - min_t(unsigned int, txq->priv->netdev->tx_queue_len, - txq->tx_ring.max_items - 1))) - netif_stop_subqueue(txq->priv->netdev, txq->q_idx); + ++txq->sent_txreqs; + if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && + !atomic_xchg(&txq->ring_full, 1)) + hfi1_ipoib_stop_txq(txq); } static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) { struct net_device *dev = txq->priv->netdev; - /* If the queue is already running just return */ - if (likely(!__netif_subqueue_stopped(dev, txq->q_idx))) - return; - /* If shutting down just return as queue state is irrelevant */ if (unlikely(dev->reg_state != NETREG_REGISTERED)) return; @@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) * Use the minimum of the current tx_queue_len or the rings max txreqs * to protect against ring overflow. */ - if (hfi1_ipoib_txreqs(txq->sent_txreqs, - atomic64_read(&txq->complete_txreqs)) - < min_t(unsigned int, dev->tx_queue_len, - txq->tx_ring.max_items) >> 1) - netif_wake_subqueue(dev, txq->q_idx); + if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && + atomic_xchg(&txq->ring_full, 0)) + hfi1_ipoib_wake_txq(txq); } static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) @@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, if (unlikely(!tx)) return ERR_PTR(-ENOMEM); - /* so that we can test if the sdma decriptors are there */ + /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; tx->priv = priv; tx->txq = txp->txq; tx->skb = skb; + INIT_LIST_HEAD(&tx->txreq.list); hfi1_ipoib_build_ib_tx_headers(tx, txp); @@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, ret = hfi1_ipoib_submit_tx(txq, tx); if (likely(!ret)) { +tx_ok: trace_sdma_output_ibhdr(tx->priv->dd, &tx->sdma_hdr.hdr, ib_is_sc5(txp->flow.sc5)); @@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, txq->pkts_sent = false; - if (ret == -EBUSY) { - list_add_tail(&tx->txreq.list, &txq->tx_list); - - trace_sdma_output_ibhdr(tx->priv->dd, - &tx->sdma_hdr.hdr, - ib_is_sc5(txp->flow.sc5)); - hfi1_ipoib_check_queue_depth(txq); - return NETDEV_TX_OK; - } - - if (ret == -ECOMM) { - hfi1_ipoib_check_queue_depth(txq); - return NETDEV_TX_OK; - } + if (ret == -EBUSY || ret == -ECOMM) + goto tx_ok; sdma_txclean(priv->dd, &tx->txreq); dev_kfree_skb_any(skb); @@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, struct ipoib_txreq *tx; /* Has the flow change ? */ - if (txq->flow.as_int != txp->flow.as_int) - (void)hfi1_ipoib_flush_tx_list(dev, txq); + if (txq->flow.as_int != txp->flow.as_int) { + int ret; + ret = hfi1_ipoib_flush_tx_list(dev, txq); + if (unlikely(ret)) { + if (ret == -EBUSY) + ++dev->stats.tx_dropped; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } tx = hfi1_ipoib_send_dma_common(dev, skb, txp); if (IS_ERR(tx)) { int ret = PTR_ERR(tx); @@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, return -EAGAIN; } - netif_stop_subqueue(txq->priv->netdev, txq->q_idx); - - if (list_empty(&txq->wait.list)) + if (list_empty(&txreq->list)) + /* came from non-list submit */ + list_add_tail(&txreq->list, &txq->tx_list); + if (list_empty(&txq->wait.list)) { + if (!atomic_xchg(&txq->no_desc, 1)) + hfi1_ipoib_stop_txq(txq); iowait_queue(pkts_sent, wait->iow, &sde->dmawait); + } write_sequnlock(&sde->waitlock); return -EBUSY; @@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work) struct net_device *dev = txq->priv->netdev; if (likely(dev->reg_state == NETREG_REGISTERED) && - likely(__netif_subqueue_stopped(dev, txq->q_idx)) && likely(!hfi1_ipoib_flush_tx_list(dev, txq))) - netif_wake_subqueue(dev, txq->q_idx); + if (atomic_xchg(&txq->no_desc, 0)) + hfi1_ipoib_wake_txq(txq); } int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) @@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) txq->sde = NULL; INIT_LIST_HEAD(&txq->tx_list); atomic64_set(&txq->complete_txreqs, 0); + atomic_set(&txq->stops, 0); + atomic_set(&txq->ring_full, 0); + atomic_set(&txq->no_desc, 0); txq->q_idx = i; txq->flow.tx_queue = 0xff; txq->flow.sc5 = 0xff; @@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) atomic64_inc(complete_txreqs); } - if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs))) + if (hfi1_ipoib_used(txq)) dd_dev_warn(txq->priv->dd, "txq %d not empty found %llu requests\n", txq->q_idx, diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c index 63688e85e8da..6d263c9749b3 100644 --- a/drivers/infiniband/hw/hfi1/netdev_rx.c +++ b/drivers/infiniband/hw/hfi1/netdev_rx.c @@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd) { if (dd->dummy_netdev) { dd_dev_info(dd, "hfi1 netdev freed\n"); - free_netdev(dd->dummy_netdev); + kfree(dd->dummy_netdev); dd->dummy_netdev = NULL; } } diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index bfa6e081cb56..d2d526c5a756 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, tx->mr = NULL; tx->sde = priv->s_sde; tx->psc = priv->s_sendcontext; - /* so that we can test if the sdma decriptors are there */ + /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; /* Set the header type */ tx->phdr.hdr.hdr_type = priv->hdr_type; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a77fa6730b2d..479fa557993e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -898,13 +898,14 @@ struct hns_roce_hw { int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, enum ib_mtu mtu); - int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx); + int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr, unsigned long mtpt_idx); int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, u32 pdn, int mr_access_flags, u64 iova, u64 size, void *mb_buf); - int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); + int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index d02207cd30df..cf39f560b800 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, val); } -static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, +static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, + struct hns_roce_mr *mr, unsigned long mtpt_idx) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v1_mpt_entry *mpt_entry; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c597d7281629..dd01a51816cc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) instance_stage = handle->rinfo.instance_state; reset_stage = handle->rinfo.reset_state; reset_cnt = ops->ae_dev_reset_cnt(handle); - hw_resetting = ops->get_hw_reset_stat(handle); + hw_resetting = ops->get_cmdq_stat(handle); sw_resetting = ops->ae_dev_resetting(handle); if (reset_cnt != hr_dev->reset_cnt) @@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, return hns_roce_cmq_send(hr_dev, &desc, 1); } -static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, +static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, + struct hns_roce_v2_mpt_entry *mpt_entry, struct hns_roce_mr *mr) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t pbl_ba; @@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, return 0; } -static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, +static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, + void *mb_buf, struct hns_roce_mr *mr, unsigned long mtpt_idx) { struct hns_roce_v2_mpt_entry *mpt_entry; @@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, if (mr->type == MR_TYPE_DMA) return 0; - ret = set_mtpt_pbl(mpt_entry, mr); + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); return ret; } @@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, mr->iova = iova; mr->size = size; - ret = set_mtpt_pbl(mpt_entry, mr); + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); } return ret; } -static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) +static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, + void *mb_buf, struct hns_roce_mr *mr) { - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_v2_mpt_entry *mpt_entry; dma_addr_t pbl_ba = 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 4c0bbb12770d..0e71ebee9e52 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, } if (mr->type != MR_TYPE_FRMR) - ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, + mtpt_idx); else - ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); + ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); if (ret) { dev_err(dev, "Write mtpt fail!\n"); goto err_page; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 81bf6b975e0e..f939c9b769f0 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (!in) return -ENOMEM; - if (MLX5_CAP_GEN(mdev, ece_support)) + if (MLX5_CAP_GEN(mdev, ece_support) && ucmd) MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); @@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, unsigned long flags; int err; - if (qp->ibqp.rwq_ind_tbl) { + if (qp->is_rss) { destroy_rss_raw_qp_tir(dev, qp); return; } - base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + base = (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) ? - &qp->raw_packet_qp.rq.base : - &qp->trans_qp.base; + &qp->raw_packet_qp.rq.base : + &qp->trans_qp.base; if (qp->state != IB_QPS_RESET) { - if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && + if (qp->type != IB_QPT_RAW_PACKET && !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, NULL, &base->mqp, NULL); @@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, base->mqp.qpn); } - get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, - &send_cq, &recv_cq); + get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, + &recv_cq); spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); mlx5_ib_lock_cqs(send_cq, recv_cq); @@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, mlx5_ib_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || + if (qp->type == IB_QPT_RAW_PACKET || qp->flags & IB_QP_CREATE_SOURCE_QPN) { destroy_raw_packet_qp(dev, qp); } else { @@ -2668,6 +2668,9 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) return (create_flags) ? -EINVAL : 0; + process_create_flag(dev, &create_flags, + IB_QP_CREATE_INTEGRITY_EN, + MLX5_CAP_GEN(mdev, sho), qp); process_create_flag(dev, &create_flags, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, MLX5_CAP_GEN(mdev, block_lb_mc), qp); @@ -2873,7 +2876,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) static int check_ucmd_data(struct mlx5_ib_dev *dev, struct mlx5_create_qp_params *params) { - struct ib_qp_init_attr *attr = params->attr; struct ib_udata *udata = params->udata; size_t size, last; int ret; @@ -2885,14 +2887,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, */ last = sizeof(struct mlx5_ib_create_qp_rss); else - /* IB_QPT_RAW_PACKET doesn't have ECE data */ - switch (attr->qp_type) { - case IB_QPT_RAW_PACKET: - last = offsetof(struct mlx5_ib_create_qp, ece_options); - break; - default: - last = offsetof(struct mlx5_ib_create_qp, reserved); - } + last = offsetof(struct mlx5_ib_create_qp, reserved); if (udata->inlen <= last) return 0; @@ -2907,7 +2902,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, if (!ret) mlx5_ib_dbg( dev, - "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n", + "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n", udata->inlen, params->ucmd_size, last, size); return ret ? 0 : -EINVAL; } @@ -3002,10 +2997,18 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, return &qp->ibqp; destroy_qp: - if (qp->type == MLX5_IB_QPT_DCT) + if (qp->type == MLX5_IB_QPT_DCT) { mlx5_ib_destroy_dct(qp); - else + } else { + /* + * The two lines below are temp solution till QP allocation + * will be moved to be under IB/core responsiblity. + */ + qp->ibqp.send_cq = attr->send_cq; + qp->ibqp.recv_cq = attr->recv_cq; destroy_qp_common(dev, qp, udata); + } + qp = NULL; free_qp: kfree(qp); @@ -4162,8 +4165,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (udata->outlen < min_resp_len) return -EINVAL; - resp.response_length = min_resp_len; - /* * If we don't have enough space for the ECE options, * simply indicate it with resp.response_length. @@ -4384,8 +4385,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, MLX5_GET(ads, path, src_addr_index), MLX5_GET(ads, path, hop_limit), MLX5_GET(ads, path, tclass)); - memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip), - MLX5_FLD_SZ_BYTES(ads, rgid_rip)); + rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip)); } } diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index c19d91d6dce8..7c3968ef9cd1 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) int ece = 0; switch (opcode) { + case MLX5_CMD_OP_INIT2INIT_QP: + ece = MLX5_GET(init2init_qp_out, out, ece); + break; case MLX5_CMD_OP_INIT2RTR_QP: ece = MLX5_GET(init2rtr_qp_out, out, ece); break; @@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) case MLX5_CMD_OP_RTS2RTS_QP: ece = MLX5_GET(rts2rts_qp_out, out, ece); break; + case MLX5_CMD_OP_RST2INIT_QP: + ece = MLX5_GET(rst2init_qp_out, out, ece); + break; default: break; } @@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, return -ENOMEM; MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, opt_param_mask, qpc, uid); + MLX5_SET(rst2init_qp_in, mbox->in, ece, ece); break; case MLX5_CMD_OP_INIT2RTR_QP: if (MBOX_ALLOC(mbox, init2rtr_qp)) @@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, return -ENOMEM; MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, opt_param_mask, qpc, uid); + MLX5_SET(init2init_qp_in, mbox->in, ece, ece); break; default: return -EINVAL; diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 792eecd206b6..97fc7dd353b0 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context, if (params->cm_info) { event.ird = params->cm_info->ird; event.ord = params->cm_info->ord; - event.private_data_len = params->cm_info->private_data_len; - event.private_data = (void *)params->cm_info->private_data; + /* Only connect_request and reply have valid private data + * the rest of the events this may be left overs from + * connection establishment. CONNECT_REQUEST is issued via + * qedr_iw_mpa_request + */ + if (event_type == IW_CM_EVENT_CONNECT_REPLY) { + event.private_data_len = + params->cm_info->private_data_len; + event.private_data = + (void *)params->cm_info->private_data; + } } if (ep->cm_id) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 511b72809e14..7db35dd6ad74 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_ud_wq_attr(qp, rdi->dparms.node); if (err) { ret = (ERR_PTR(err)); - goto bail_driver_priv; + goto bail_rq_rvt; } if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) @@ -1314,9 +1314,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: - rvt_free_rq(&qp->r_rq); free_ud_wq_attr(qp); +bail_rq_rvt: + rvt_free_rq(&qp->r_rq); + bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 650520244ed0..7271d705f4b0 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, break; bytes = min(bytes, len); - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == + bytes) { copied += bytes; offset += bytes; len -= bytes; diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 654252361653..13eacf6ab431 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -1021,7 +1021,7 @@ static int __init hp_sdc_register(void) hp_sdc.base_io = (unsigned long) 0xf0428000; hp_sdc.data_io = (unsigned long) hp_sdc.base_io + 1; hp_sdc.status_io = (unsigned long) hp_sdc.base_io + 3; - if (!probe_kernel_read(&i, (unsigned char *)hp_sdc.data_io, 1)) + if (!copy_from_kernel_nofault(&i, (unsigned char *)hp_sdc.data_io, 1)) hp_sdc.dev = (void *)1; hp_sdc.dev_err = hp_sdc_init(); #endif diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b510f67dfa49..6dc49ed8377a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86 + depends on INTEL_IOMMU && X86_64 select PCI_PASID select PCI_PRI select MMU_NOTIFIER diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index cc46dff98fa0..683b812c5c47 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -898,7 +898,8 @@ int __init detect_intel_iommu(void) if (!ret) ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, &validate_drhd_cb); - if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret && !no_iommu && !iommu_detected && + (!dmar_disabled || dmar_platform_optin())) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 9129663a7406..d759e7234e98 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) return g_iommus[iommu_id]; } +static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) +{ + return sm_supported(iommu) ? + ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); +} + static void domain_update_iommu_coherency(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; @@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) for_each_domain_iommu(i, domain) { found = true; - if (!ecap_coherent(g_iommus[i]->ecap)) { + if (!iommu_paging_structure_coherency(g_iommus[i])) { domain->iommu_coherency = 0; break; } @@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) /* No hardware attached; use lowest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { - if (!ecap_coherent(iommu->ecap)) { + if (!iommu_paging_structure_coherency(iommu)) { domain->iommu_coherency = 0; break; } @@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain_use_first_level(domain)) - pteval |= DMA_FL_PTE_XD; + pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US; if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); @@ -1951,7 +1957,6 @@ static inline void context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) { context->hi |= pasid & ((1 << 20) - 1); - context->hi |= (1 << 20); } /* @@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_set_fault_enable(context); context_set_present(context); - domain_flush_cache(domain, context, sizeof(*context)); + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); /* * It's a non-present to present mapping. If hardware doesn't cache @@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); if (domain_use_first_level(domain)) - attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD; + attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US; if (!sg) { sg_res = nr_pages; @@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw) end >> agaw_to_width(si_domain->agaw))) continue; - ret = iommu_domain_identity_map(si_domain, start, end); + ret = iommu_domain_identity_map(si_domain, + mm_to_dma_pfn(start >> PAGE_SHIFT), + mm_to_dma_pfn(end >> PAGE_SHIFT)); if (ret) return ret; } @@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, return ret; } +/* + * Check that the device does not live on an external facing PCI port that is + * marked as untrusted. Such devices should not be able to apply quirks and + * thus not be able to bypass the IOMMU restrictions. + */ +static bool risky_device(struct pci_dev *pdev) +{ + if (pdev->untrusted) { + pci_info(pdev, + "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n", + pdev->vendor, pdev->device); + pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n"); + return true; + } + return false; +} + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = { static void quirk_iommu_igfx(struct pci_dev *dev) { + if (risky_device(dev)) + return; + pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; } @@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); static void quirk_iommu_rwbf(struct pci_dev *dev) { + if (risky_device(dev)) + return; + /* * Mobile 4 Series Chipset neglects to set RWBF capability, * but needs it. Same seems to hold for the desktop versions. @@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) { unsigned short ggc; + if (risky_device(dev)) + return; + if (pci_read_config_word(dev, GGC, &ggc)) return; @@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); if (!pdev) return; + + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + pci_dev_put(pdev); /* System Management Registers. Might be hidden, in which case @@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void) if (!pdev) return; + if (risky_device(pdev)) { + pci_dev_put(pdev); + return; + } + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { pci_dev_put(pdev); return; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 39de94edd73a..6548a601edf0 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1389,7 +1389,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__set_blocks(n1, n1->keys + n2->keys, block_bytes(b->c)) > btree_blocks(new_nodes[i])) - goto out_nocoalesce; + goto out_unlock_nocoalesce; keys = n2->keys; /* Take the key of the node we're getting rid of */ @@ -1418,7 +1418,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) - goto out_nocoalesce; + goto out_unlock_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); bch_keylist_add(&keylist, &new_nodes[i]->key); @@ -1464,6 +1464,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, /* Invalidated our iterator */ return -EINTR; +out_unlock_nocoalesce: + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); + out_nocoalesce: closure_sync(&cl); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f9975c22bf7e..2014016f9a60 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -819,7 +820,8 @@ static void bcache_device_free(struct bcache_device *d) } static int bcache_device_init(struct bcache_device *d, unsigned int block_size, - sector_t sectors, make_request_fn make_request_fn) + sector_t sectors, make_request_fn make_request_fn, + struct block_device *cached_bdev) { struct request_queue *q; const size_t max_stripes = min_t(size_t, INT_MAX, @@ -885,6 +887,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, q->limits.io_min = block_size; q->limits.logical_block_size = block_size; q->limits.physical_block_size = block_size; + + if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) { + /* + * This should only happen with BCACHE_SB_VERSION_BDEV. + * Block/page size is checked for BCACHE_SB_VERSION_CDEV. + */ + pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n", + d->disk->disk_name, q->limits.logical_block_size, + PAGE_SIZE, bdev_logical_block_size(cached_bdev)); + + /* This also adjusts physical block size/min io size if needed */ + blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev)); + } + blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); @@ -1340,7 +1356,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) ret = bcache_device_init(&dc->disk, block_size, dc->bdev->bd_part->nr_sects - dc->sb.data_offset, - cached_dev_make_request); + cached_dev_make_request, dc->bdev); if (ret) return ret; @@ -1453,7 +1469,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) kobject_init(&d->kobj, &bch_flash_dev_ktype); if (bcache_device_init(d, block_bytes(c), u->sectors, - flash_dev_make_request)) + flash_dev_make_request, NULL)) goto err; bcache_device_attach(d, c, u - c->uuids); @@ -2364,7 +2380,7 @@ static bool bch_is_open(struct block_device *bdev) } struct async_reg_args { - struct work_struct reg_work; + struct delayed_work reg_work; char *path; struct cache_sb *sb; struct cache_sb_disk *sb_disk; @@ -2375,7 +2391,7 @@ static void register_bdev_worker(struct work_struct *work) { int fail = false; struct async_reg_args *args = - container_of(work, struct async_reg_args, reg_work); + container_of(work, struct async_reg_args, reg_work.work); struct cached_dev *dc; dc = kzalloc(sizeof(*dc), GFP_KERNEL); @@ -2405,7 +2421,7 @@ static void register_cache_worker(struct work_struct *work) { int fail = false; struct async_reg_args *args = - container_of(work, struct async_reg_args, reg_work); + container_of(work, struct async_reg_args, reg_work.work); struct cache *ca; ca = kzalloc(sizeof(*ca), GFP_KERNEL); @@ -2433,11 +2449,12 @@ static void register_cache_worker(struct work_struct *work) static void register_device_aync(struct async_reg_args *args) { if (SB_IS_BDEV(args->sb)) - INIT_WORK(&args->reg_work, register_bdev_worker); + INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); else - INIT_WORK(&args->reg_work, register_cache_worker); + INIT_DELAYED_WORK(&args->reg_work, register_cache_worker); - queue_work(system_wq, &args->reg_work); + /* 10 jiffies is enough for a delay */ + queue_delayed_work(system_wq, &args->reg_work, 10); } static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index ac83f5002ce5..489935d5f22d 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table, /* * Check we have enough space. */ - needed = sizeof(*deps) + (sizeof(*deps->dev) * count); + needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; return; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 74f3c506f084..30505d70f423 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) while (daa-- && i < p) { pages[i++] = pfn_t_to_page(pfn); pfn.val++; + if (!(i & 15)) + cond_resched(); } } while (i < p); wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL); @@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_ if (likely(!e->write_in_progress)) { if (!discarded_something) { - writecache_wait_for_ios(wc, READ); - writecache_wait_for_ios(wc, WRITE); + if (!WC_MODE_PMEM(wc)) { + writecache_wait_for_ios(wc, READ); + writecache_wait_for_ios(wc, WRITE); + } discarded_something = true; } + if (!writecache_entry_is_committed(wc, e)) + wc->uncommitted_blocks--; writecache_free_entry(wc, e); } diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 130b5a6d9f12..5cf6f5f552e0 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb, nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1) >> zmd->zone_nr_blocks_shift; if (!nr_meta_zones || - nr_meta_zones >= zmd->nr_rnd_zones) { + (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) || + (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) { dmz_dev_err(dev, "Invalid number of metadata blocks"); return -ENXIO; } @@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int idx, bool idle) { struct dm_zone *dzone = NULL; - struct dm_zone *zone, *last = NULL; + struct dm_zone *zone, *maxw_z = NULL; struct list_head *zone_list; /* If we have cache zones select from the cache zone list */ @@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, } else zone_list = &zmd->dev[idx].map_rnd_list; + /* + * Find the buffer zone with the heaviest weight or the first (oldest) + * data zone that can be reclaimed. + */ list_for_each_entry(zone, zone_list, link) { if (dmz_is_buf(zone)) { dzone = zone->bzone; - if (dzone->dev->dev_idx != idx) + if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) continue; - if (!last) { - last = dzone; + if (!maxw_z || maxw_z->weight < dzone->weight) + maxw_z = dzone; + } else { + dzone = zone; + if (dmz_lock_zone_reclaim(dzone)) + return dzone; + } + } + + if (maxw_z && dmz_lock_zone_reclaim(maxw_z)) + return maxw_z; + + /* + * If we come here, none of the zones inspected could be locked for + * reclaim. Try again, being more aggressive, that is, find the + * first zone that can be reclaimed regardless of its weitght. + */ + list_for_each_entry(zone, zone_list, link) { + if (dmz_is_buf(zone)) { + dzone = zone->bzone; + if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) continue; - } - if (last->weight < dzone->weight) - continue; - dzone = last; } else dzone = zone; if (dmz_lock_zone_reclaim(dzone)) @@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, unsigned int dev_idx, bool idle) { - struct dm_zone *zone; + struct dm_zone *zone = NULL; /* * Search for a zone candidate to reclaim: 2 cases are possible. @@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, dmz_lock_map(zmd); if (list_empty(&zmd->reserved_seq_zones_list)) zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx); - else + if (!zone) zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle); dmz_unlock_map(zmd); diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 2261b4dd60b7..dd1eebf6e50f 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) dmz_metadata_label(zmd), zrc->dev_idx); return -EBUSY; } + rzone = dzone; start = jiffies; if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) { @@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) */ ret = dmz_reclaim_rnd_data(zrc, dzone); } - rzone = dzone; - } else { struct dm_zone *bzone = dzone->bzone; sector_t chunk_block = 0; @@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc) * be later reclaimed. */ ret = dmz_reclaim_seq_data(zrc, dzone); - rzone = dzone; } } out: diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index a907a9446c0b..cf915009c306 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -890,7 +890,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) } /* Set target (no write same support) */ - ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9; + ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata); ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->num_write_zeroes_bios = 1; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 109e81f33edb..e6807792fec8 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1009,6 +1009,7 @@ static void clone_endio(struct bio *bio) struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; + struct bio *orig_bio = io->orig_bio; if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { if (bio_op(bio) == REQ_OP_DISCARD && @@ -1022,6 +1023,18 @@ static void clone_endio(struct bio *bio) disable_write_zeroes(md); } + /* + * For zone-append bios get offset in zone of the written + * sector and add that to the original bio sector pos. + */ + if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { + sector_t written_sector = bio->bi_iter.bi_sector; + struct request_queue *q = orig_bio->bi_disk->queue; + u64 mask = (u64)blk_queue_zone_sectors(q) - 1; + + orig_bio->bi_iter.bi_sector += written_sector & mask; + } + if (endio) { int r = endio(tio->ti, bio, &error); switch (r) { diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index a4ee6b86663e..b91e472ee764 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -39,8 +39,6 @@ * Troy Laramy */ -#include - #include #include #include diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index 10c214bd0903..1ac9aef70dff 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h index 3362962d0d00..b02a3c7b7742 100644 --- a/drivers/media/usb/pwc/pwc.h +++ b/drivers/media/usb/pwc/pwc.h @@ -193,7 +193,7 @@ struct pwc_raw_frame { decompressor) */ __u8 cmd[4]; /* the four byte of the command (in case of nala, only the first 3 bytes is filled) */ - __u8 rawframe[0]; /* frame_size = H / 4 * vbandlength */ + __u8 rawframe[]; /* frame_size = H / 4 * vbandlength */ } __packed; /* intermediate buffers with raw data from the USB cam */ diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index f0737c57ed5f..1491561d2e5c 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); int mptscsih_resume(struct pci_dev *pdev); #endif -#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR /* Copy the sense received into the scsi command block. */ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc)); + memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC); /* Log SMART data (asc = 0x5D, non-IM case only) if required. */ diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c index db8cdf5272c1..e9cacc27d980 100644 --- a/drivers/mfd/mt6360-core.c +++ b/drivers/mfd/mt6360-core.c @@ -412,6 +412,7 @@ MODULE_DEVICE_TABLE(of, mt6360_pmu_of_id); static struct i2c_driver mt6360_pmu_driver = { .driver = { + .name = "mt6360_pmu", .pm = &mt6360_pmu_pm_ops, .of_match_table = of_match_ptr(mt6360_pmu_of_id), }, diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index f82974a916c3..b0f62cbbdc87 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence) container_of(fence, struct hl_cs_compl, base_fence); struct hl_device *hdev = hl_cs_cmpl->hdev; + /* EBUSY means the CS was never submitted and hence we don't have + * an attached hw_sob object that we should handle here + */ + if (fence->error == -EBUSY) + goto free; + if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || (hl_cs_cmpl->type == CS_TYPE_WAIT)) { @@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence) kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); } +free: kfree_rcu(hl_cs_cmpl, base_fence.rcu); } @@ -328,10 +335,16 @@ static void cs_do_release(struct kref *ref) hl_ctx_put(cs->ctx); + /* We need to mark an error for not submitted because in that case + * the dma fence release flow is different. Mainly, we don't need + * to handle hw_sob for signal/wait + */ if (cs->timedout) dma_fence_set_error(cs->fence, -ETIMEDOUT); else if (cs->aborted) dma_fence_set_error(cs->fence, -EIO); + else if (!cs->submitted) + dma_fence_set_error(cs->fence, -EBUSY); dma_fence_signal(cs->fence); dma_fence_put(cs->fence); diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index 3c8dcdfba20c..fc4372c18ce2 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -480,7 +480,7 @@ static int mmu_show(struct seq_file *s, void *data) return 0; } -static ssize_t mmu_write(struct file *file, const char __user *buf, +static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf, size_t count, loff_t *f_pos) { struct seq_file *s = file->private_data; @@ -1125,7 +1125,7 @@ static const struct hl_info_list hl_debugfs_list[] = { {"command_submission_jobs", command_submission_jobs_show, NULL}, {"userptr", userptr_show, NULL}, {"vm", vm_show, NULL}, - {"mmu", mmu_show, mmu_write}, + {"mmu", mmu_show, mmu_asid_va_write}, {"engines", engines_show, NULL} }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 61f88e9884ce..834470d10b46 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -96,7 +96,7 @@ #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3 -#define GAUDI_ARB_WDT_TIMEOUT 0x400000 +#define GAUDI_ARB_WDT_TIMEOUT 0x1000000 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", @@ -1893,6 +1893,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id, WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo); WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi); + WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100); + /* The following configuration is needed only once per QMAN */ if (qman_id == 0) { /* Configure RAZWI IRQ */ @@ -2725,6 +2727,12 @@ static int gaudi_mmu_init(struct hl_device *hdev) WREG32(mmSTLB_HOP_CONFIGURATION, hdev->mmu_huge_page_opt ? 0x30440 : 0x40440); + /* + * The H/W expects the first PI after init to be 1. After wraparound + * we'll write 0. + */ + gaudi->mmu_cache_inv_pi = 1; + gaudi->hw_cap_initialized |= HW_CAP_MMU; return 0; @@ -3790,6 +3798,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev, src_in_host); } +static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_load_and_exe *user_pkt) +{ + u32 cfg; + + cfg = le32_to_cpu(user_pkt->cfg); + + if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) { + dev_err(hdev->dev, + "User not allowed to use Load and Execute\n"); + return -EPERM; + } + + parser->patched_cb_size += sizeof(struct packet_load_and_exe); + + return 0; +} + static int gaudi_validate_cb(struct hl_device *hdev, struct hl_cs_parser *parser, bool is_mmu) { @@ -3838,6 +3865,11 @@ static int gaudi_validate_cb(struct hl_device *hdev, rc = -EPERM; break; + case PACKET_LOAD_AND_EXE: + rc = gaudi_validate_load_and_exe_pkt(hdev, parser, + (struct packet_load_and_exe *) user_pkt); + break; + case PACKET_LIN_DMA: parser->contains_dma_pkt = true; if (is_mmu) @@ -3855,7 +3887,6 @@ static int gaudi_validate_cb(struct hl_device *hdev, case PACKET_FENCE: case PACKET_NOP: case PACKET_ARB_POINT: - case PACKET_LOAD_AND_EXE: parser->patched_cb_size += pkt_size; break; @@ -5994,6 +6025,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, mutex_lock(&hdev->mmu_cache_lock); /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_PS, 3); + WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); WREG32(mmSTLB_INV_PS, 2); rc = hl_poll_timeout( diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index a46530d375fa..41a8d9bff6bf 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -229,6 +229,8 @@ struct gaudi_internal_qman_info { * @multi_msi_mode: whether we are working in multi MSI single MSI mode. * Multi MSI is possible only with IOMMU enabled. * @ext_queue_idx: helper index for external queues initialization. + * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an + * 8-bit value so use u8. */ struct gaudi_device { int (*armcp_info_get)(struct hl_device *hdev); @@ -248,6 +250,7 @@ struct gaudi_device { u32 hw_cap_initialized; u8 multi_msi_mode; u8 ext_queue_idx; + u8 mmu_cache_inv_pi; }; void gaudi_init_security(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h index 9a5800b0086b..0f0cd067bb43 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h @@ -197,6 +197,9 @@ struct packet_wait { __le32 ctl; }; +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0 +#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001 + struct packet_load_and_exe { __le32 cfg; __le32 ctl; diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index bccd341e9ae1..d5d2af4d10e6 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -828,7 +828,7 @@ static void run_plant_and_detach_test(int is_early) char before[BREAK_INSTR_SIZE]; char after[BREAK_INSTR_SIZE]; - probe_kernel_read(before, (char *)kgdbts_break_test, + copy_from_kernel_nofault(before, (char *)kgdbts_break_test, BREAK_INSTR_SIZE); init_simple_test(); ts.tst = plant_and_detach_test; @@ -836,8 +836,8 @@ static void run_plant_and_detach_test(int is_early) /* Activate test with initial breakpoint */ if (!is_early) kgdb_breakpoint(); - probe_kernel_read(after, (char *)kgdbts_break_test, - BREAK_INSTR_SIZE); + copy_from_kernel_nofault(after, (char *)kgdbts_break_test, + BREAK_INSTR_SIZE); if (memcmp(before, after, BREAK_INSTR_SIZE)) { printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); panic("kgdb memory corruption"); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 9392934e3a06..7becfc768bbc 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -94,6 +94,7 @@ #define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */ #define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */ +#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */ #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ @@ -107,6 +108,8 @@ # define PCI_CFG_HFS_1_D0I3_MSK 0x80000000 #define PCI_CFG_HFS_2 0x48 #define PCI_CFG_HFS_3 0x60 +# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070 +# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 #define PCI_CFG_HFS_4 0x64 #define PCI_CFG_HFS_5 0x68 #define PCI_CFG_HFS_6 0x6C diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index f620442addf5..7649710a2ab9 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) #define MEI_CFG_FW_NM \ .quirk_probe = mei_me_fw_type_nm -static bool mei_me_fw_type_sps(struct pci_dev *pdev) +static bool mei_me_fw_type_sps_4(struct pci_dev *pdev) { u32 reg; unsigned int devfn; @@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) return (reg & 0xf0000) == 0xf0000; } -#define MEI_CFG_FW_SPS \ +#define MEI_CFG_FW_SPS_4 \ + .quirk_probe = mei_me_fw_type_sps_4 + +/** + * mei_me_fw_sku_sps() - check for sps sku + * + * Read ME FW Status register to check for SPS Firmware. + * The SPS FW is only signaled in pci function 0 + * + * @pdev: pci device + * + * Return: true in case of SPS firmware + */ +static bool mei_me_fw_type_sps(struct pci_dev *pdev) +{ + u32 reg; + u32 fw_type; + unsigned int devfn; + + devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); + pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, ®); + trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg); + fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK); + + dev_dbg(&pdev->dev, "fw type is %d\n", fw_type); + + return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS; +} + +#define MEI_CFG_FW_SPS \ .quirk_probe = mei_me_fw_type_sps #define MEI_CFG_FW_VER_SUPP \ @@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = { }; /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */ -static const struct mei_cfg mei_me_pch8_sps_cfg = { +static const struct mei_cfg mei_me_pch8_sps_4_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, - MEI_CFG_FW_SPS, + MEI_CFG_FW_SPS_4, +}; + +/* LBG with quirk for SPS (4.0) Firmware exclusion */ +static const struct mei_cfg mei_me_pch12_sps_4_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_FW_SPS_4, }; /* Cannon Lake and newer devices */ @@ -1465,8 +1501,18 @@ static const struct mei_cfg mei_me_pch12_cfg = { MEI_CFG_DMA_128, }; -/* LBG with quirk for SPS Firmware exclusion */ +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */ static const struct mei_cfg mei_me_pch12_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_FW_SPS, +}; + +/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion + * w/o DMA support + */ +static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = { MEI_CFG_PCH8_HFS, MEI_CFG_FW_VER_SUPP, MEI_CFG_FW_SPS, @@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = { MEI_CFG_TRC, }; +/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */ +static const struct mei_cfg mei_me_pch15_sps_cfg = { + MEI_CFG_PCH8_HFS, + MEI_CFG_FW_VER_SUPP, + MEI_CFG_DMA_128, + MEI_CFG_TRC, + MEI_CFG_FW_SPS, +}; + /* * mei_cfg_list - A list of platform platform specific configurations. * Note: has to be synchronized with enum mei_cfg_idx. @@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = { [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg, [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg, [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg, - [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg, + [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg, [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg, + [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg, [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg, + [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg, [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg, + [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg, }; const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx) diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index b6b94e211464..6a8973649c49 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -76,14 +76,20 @@ struct mei_me_hw { * with quirk for Node Manager exclusion. * @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer * client platforms. - * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer + * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer * servers platforms with quirk for * SPS firmware exclusion. * @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer - * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer + * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0 + * servers platforms with quirk for + * SPS firmware exclusion. + * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer * servers platforms with quirk for * SPS firmware exclusion. * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer + * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer + * servers platforms with quirk for + * SPS firmware exclusion. * @MEI_ME_NUM_CFG: Upper Sentinel. */ enum mei_cfg_idx { @@ -94,10 +100,13 @@ enum mei_cfg_idx { MEI_ME_PCH7_CFG, MEI_ME_PCH_CPT_PBG_CFG, MEI_ME_PCH8_CFG, - MEI_ME_PCH8_SPS_CFG, + MEI_ME_PCH8_SPS_4_CFG, MEI_ME_PCH12_CFG, + MEI_ME_PCH12_SPS_4_CFG, MEI_ME_PCH12_SPS_CFG, + MEI_ME_PCH12_SPS_NODMA_CFG, MEI_ME_PCH15_CFG, + MEI_ME_PCH15_SPS_CFG, MEI_ME_NUM_CFG, }; diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 71f795b510ce..2a3f2fd5df50 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, @@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, - {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)}, @@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)}, diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index efd1a1d1f35e..3dd46cd55114 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -552,6 +552,8 @@ static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, struct netlink_ext_ack *extack) { + memset(conf, 0, sizeof(*conf)); + if (!data[IFLA_BAREUDP_PORT]) { NL_SET_ERR_MSG(extack, "port not specified"); return -EINVAL; @@ -570,6 +572,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_SRCPORT_MIN]) conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); + if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) + conf->multi_proto_mode = true; + return 0; } diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index d08a3d559114..6ad83a881039 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c @@ -146,7 +146,7 @@ struct pciefd_rx_dma { __le32 irq_status; __le32 sys_time_low; __le32 sys_time_high; - struct pucan_rx_msg msg[0]; + struct pucan_rx_msg msg[]; } __packed __aligned(4); /* Tx Link record */ @@ -194,7 +194,7 @@ struct pciefd_board { struct pci_dev *pci_dev; int can_count; spinlock_t cmd_lock; /* 64-bits cmds must be atomic */ - struct pciefd_can *can[0]; /* array of network devices */ + struct pciefd_can *can[]; /* array of network devices */ }; /* supported device ids. */ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index c7ac63f41918..946e41f020a5 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1147,6 +1147,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); + /* Balance of_node_put() done by of_find_node_by_name() */ + of_node_get(dn); ports = of_find_node_by_name(dn, "ports"); if (ports) { bcm_sf2_identify_ports(priv, ports); diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index bc0e47c1dbb9..177134596458 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -891,16 +891,16 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port, mutex_lock(&ptp_data->lock); - rc = sja1105_ptpclkval_read(priv, &ticks, NULL); + rc = sja1105_ptpegr_ts_poll(ds, port, &ts); if (rc < 0) { - dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc); + dev_err(ds->dev, "timed out polling for tstamp\n"); kfree_skb(skb); goto out; } - rc = sja1105_ptpegr_ts_poll(ds, port, &ts); + rc = sja1105_ptpclkval_read(priv, &ticks, NULL); if (rc < 0) { - dev_err(ds->dev, "timed out polling for tstamp\n"); + dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc); kfree_skb(skb); goto out; } diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index bdfd6c4e190d..af3565160db6 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -7,6 +7,165 @@ #define SJA1105_SIZE_VL_STATUS 8 +/* Insert into the global gate list, sorted by gate action time. */ +static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg, + struct sja1105_rule *rule, + u8 gate_state, s64 entry_time, + struct netlink_ext_ack *extack) +{ + struct sja1105_gate_entry *e; + int rc; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->rule = rule; + e->gate_state = gate_state; + e->interval = entry_time; + + if (list_empty(&gating_cfg->entries)) { + list_add(&e->list, &gating_cfg->entries); + } else { + struct sja1105_gate_entry *p; + + list_for_each_entry(p, &gating_cfg->entries, list) { + if (p->interval == e->interval) { + NL_SET_ERR_MSG_MOD(extack, + "Gate conflict"); + rc = -EBUSY; + goto err; + } + + if (e->interval < p->interval) + break; + } + list_add(&e->list, p->list.prev); + } + + gating_cfg->num_entries++; + + return 0; +err: + kfree(e); + return rc; +} + +/* The gate entries contain absolute times in their e->interval field. Convert + * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5"). + */ +static void +sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg, + u64 cycle_time) +{ + struct sja1105_gate_entry *last_e; + struct sja1105_gate_entry *e; + struct list_head *prev; + + list_for_each_entry(e, &gating_cfg->entries, list) { + struct sja1105_gate_entry *p; + + prev = e->list.prev; + + if (prev == &gating_cfg->entries) + continue; + + p = list_entry(prev, struct sja1105_gate_entry, list); + p->interval = e->interval - p->interval; + } + last_e = list_last_entry(&gating_cfg->entries, + struct sja1105_gate_entry, list); + last_e->interval = cycle_time - last_e->interval; +} + +static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg) +{ + struct sja1105_gate_entry *e, *n; + + list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { + list_del(&e->list); + kfree(e); + } +} + +static int sja1105_compose_gating_subschedule(struct sja1105_private *priv, + struct netlink_ext_ack *extack) +{ + struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg; + struct sja1105_rule *rule; + s64 max_cycle_time = 0; + s64 its_base_time = 0; + int i, rc = 0; + + sja1105_free_gating_config(gating_cfg); + + list_for_each_entry(rule, &priv->flow_block.rules, list) { + if (rule->type != SJA1105_RULE_VL) + continue; + if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) + continue; + + if (max_cycle_time < rule->vl.cycle_time) { + max_cycle_time = rule->vl.cycle_time; + its_base_time = rule->vl.base_time; + } + } + + if (!max_cycle_time) + return 0; + + dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n", + max_cycle_time, its_base_time); + + gating_cfg->base_time = its_base_time; + gating_cfg->cycle_time = max_cycle_time; + gating_cfg->num_entries = 0; + + list_for_each_entry(rule, &priv->flow_block.rules, list) { + s64 time; + s64 rbt; + + if (rule->type != SJA1105_RULE_VL) + continue; + if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) + continue; + + /* Calculate the difference between this gating schedule's + * base time, and the base time of the gating schedule with the + * longest cycle time. We call it the relative base time (rbt). + */ + rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time, + its_base_time); + rbt -= its_base_time; + + time = rbt; + + for (i = 0; i < rule->vl.num_entries; i++) { + u8 gate_state = rule->vl.entries[i].gate_state; + s64 entry_time = time; + + while (entry_time < max_cycle_time) { + rc = sja1105_insert_gate_entry(gating_cfg, rule, + gate_state, + entry_time, + extack); + if (rc) + goto err; + + entry_time += rule->vl.cycle_time; + } + time += rule->vl.entries[i].interval; + } + } + + sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time); + + return 0; +err: + sja1105_free_gating_config(gating_cfg); + return rc; +} + /* The switch flow classification core implements TTEthernet, which 'thinks' in * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7. * However it also has one other operating mode (VLLUPFORMAT=0) where it acts @@ -342,7 +501,9 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port, NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on DMAC"); return -EOPNOTSUPP; - } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || + priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && + key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; @@ -388,173 +549,21 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port, kfree(rule); } + rc = sja1105_compose_gating_subschedule(priv, extack); + if (rc) + return rc; + rc = sja1105_init_virtual_links(priv, extack); if (rc) return rc; + rc = sja1105_init_scheduling(priv); + if (rc < 0) + return rc; + return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS); } -/* Insert into the global gate list, sorted by gate action time. */ -static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg, - struct sja1105_rule *rule, - u8 gate_state, s64 entry_time, - struct netlink_ext_ack *extack) -{ - struct sja1105_gate_entry *e; - int rc; - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) - return -ENOMEM; - - e->rule = rule; - e->gate_state = gate_state; - e->interval = entry_time; - - if (list_empty(&gating_cfg->entries)) { - list_add(&e->list, &gating_cfg->entries); - } else { - struct sja1105_gate_entry *p; - - list_for_each_entry(p, &gating_cfg->entries, list) { - if (p->interval == e->interval) { - NL_SET_ERR_MSG_MOD(extack, - "Gate conflict"); - rc = -EBUSY; - goto err; - } - - if (e->interval < p->interval) - break; - } - list_add(&e->list, p->list.prev); - } - - gating_cfg->num_entries++; - - return 0; -err: - kfree(e); - return rc; -} - -/* The gate entries contain absolute times in their e->interval field. Convert - * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5"). - */ -static void -sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg, - u64 cycle_time) -{ - struct sja1105_gate_entry *last_e; - struct sja1105_gate_entry *e; - struct list_head *prev; - - list_for_each_entry(e, &gating_cfg->entries, list) { - struct sja1105_gate_entry *p; - - prev = e->list.prev; - - if (prev == &gating_cfg->entries) - continue; - - p = list_entry(prev, struct sja1105_gate_entry, list); - p->interval = e->interval - p->interval; - } - last_e = list_last_entry(&gating_cfg->entries, - struct sja1105_gate_entry, list); - if (last_e->list.prev != &gating_cfg->entries) - last_e->interval = cycle_time - last_e->interval; -} - -static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg) -{ - struct sja1105_gate_entry *e, *n; - - list_for_each_entry_safe(e, n, &gating_cfg->entries, list) { - list_del(&e->list); - kfree(e); - } -} - -static int sja1105_compose_gating_subschedule(struct sja1105_private *priv, - struct netlink_ext_ack *extack) -{ - struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg; - struct sja1105_rule *rule; - s64 max_cycle_time = 0; - s64 its_base_time = 0; - int i, rc = 0; - - list_for_each_entry(rule, &priv->flow_block.rules, list) { - if (rule->type != SJA1105_RULE_VL) - continue; - if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) - continue; - - if (max_cycle_time < rule->vl.cycle_time) { - max_cycle_time = rule->vl.cycle_time; - its_base_time = rule->vl.base_time; - } - } - - if (!max_cycle_time) - return 0; - - dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n", - max_cycle_time, its_base_time); - - sja1105_free_gating_config(gating_cfg); - - gating_cfg->base_time = its_base_time; - gating_cfg->cycle_time = max_cycle_time; - gating_cfg->num_entries = 0; - - list_for_each_entry(rule, &priv->flow_block.rules, list) { - s64 time; - s64 rbt; - - if (rule->type != SJA1105_RULE_VL) - continue; - if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED) - continue; - - /* Calculate the difference between this gating schedule's - * base time, and the base time of the gating schedule with the - * longest cycle time. We call it the relative base time (rbt). - */ - rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time, - its_base_time); - rbt -= its_base_time; - - time = rbt; - - for (i = 0; i < rule->vl.num_entries; i++) { - u8 gate_state = rule->vl.entries[i].gate_state; - s64 entry_time = time; - - while (entry_time < max_cycle_time) { - rc = sja1105_insert_gate_entry(gating_cfg, rule, - gate_state, - entry_time, - extack); - if (rc) - goto err; - - entry_time += rule->vl.cycle_time; - } - time += rule->vl.entries[i].interval; - } - } - - sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time); - - return 0; -err: - sja1105_free_gating_config(gating_cfg); - return rc; -} - int sja1105_vl_gate(struct sja1105_private *priv, int port, struct netlink_ext_ack *extack, unsigned long cookie, struct sja1105_key *key, u32 index, s32 prio, @@ -588,14 +597,12 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, if (priv->vlan_state == SJA1105_VLAN_UNAWARE && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { - dev_err(priv->ds->dev, "1: vlan state %d key type %d\n", - priv->vlan_state, key->type); NL_SET_ERR_MSG_MOD(extack, "Can only gate based on DMAC"); return -EOPNOTSUPP; - } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) { - dev_err(priv->ds->dev, "2: vlan state %d key type %d\n", - priv->vlan_state, key->type); + } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT || + priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) && + key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index b9b4edb913c1..9b7f1af5f574 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1249,8 +1249,12 @@ static int __alx_open(struct alx_priv *alx, bool resume) static void __alx_stop(struct alx_priv *alx) { - alx_halt(alx); alx_free_irq(alx); + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } @@ -1855,9 +1859,6 @@ static void alx_remove(struct pci_dev *pdev) struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c62589c266b2..6a884df44612 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6292,6 +6292,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { + struct hwrm_stat_ctx_clr_stats_input req0 = {0}; struct hwrm_stat_ctx_free_input req = {0}; int i; @@ -6301,6 +6302,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return; + bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); @@ -6310,7 +6312,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - + if (BNXT_FW_MAJ(bp) <= 20) { + req0.stat_ctx_id = req.stat_ctx_id; + _hwrm_send_message(bp, &req0, sizeof(req0), + HWRM_CMD_TIMEOUT); + } _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -6976,7 +6982,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; bp->tx_push_thresh = 0; - if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) + if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && + BNXT_FW_MAJ(bp) > 217) bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -7240,8 +7247,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) static int bnxt_hwrm_ver_get(struct bnxt *bp) { struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; - int rc; + int rc, len; bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; mutex_lock(&bp->hwrm_cmd_lock); @@ -7273,9 +7281,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, resp->hwrm_intf_upd_8b); - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", - resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, - resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + len = FW_VER_STR_LEN; + } else { + fw_maj = resp->hwrm_fw_maj_8b; + fw_min = resp->hwrm_fw_min_8b; + fw_bld = resp->hwrm_fw_bld_8b; + fw_rsv = resp->hwrm_fw_rsvd_8b; + len = BC_HWRM_STR_LEN; + } + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, + fw_rsv); if (strlen(resp->active_pkg_name)) { int fw_ver_len = strlen(bp->fw_ver_str); @@ -10037,7 +10058,7 @@ static void bnxt_timer(struct timer_list *t) struct bnxt *bp = from_timer(bp, t, timer); struct net_device *dev = bp->dev; - if (!netif_running(dev)) + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) return; if (atomic_read(&bp->intr_sem) != 0) @@ -11892,7 +11913,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &bnxt_ethtool_ops; pci_set_drvdata(pdev, dev); - bnxt_vpd_read_info(bp); + if (BNXT_PF(bp)) + bnxt_vpd_read_info(bp); rc = bnxt_alloc_hwrm_resources(bp); if (rc) @@ -12133,19 +12155,9 @@ static int bnxt_resume(struct device *device) goto resume_exit; } - if (bnxt_hwrm_queue_qportcfg(bp)) { - rc = -ENODEV; + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) goto resume_exit; - } - - if (bp->hwrm_spec_code >= 0x10803) { - if (bnxt_alloc_ctx_mem(bp)) { - rc = -ENODEV; - goto resume_exit; - } - } - if (BNXT_NEW_RM(bp)) - bnxt_hwrm_func_resc_qcaps(bp, false); if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { rc = -ENODEV; @@ -12161,6 +12173,8 @@ static int bnxt_resume(struct device *device) resume_exit: bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); rtnl_unlock(); return rc; } @@ -12204,6 +12218,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, bnxt_close(netdev); pci_disable_device(pdev); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rtnl_unlock(); /* Request a slot slot reset. */ @@ -12237,12 +12254,16 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); err = bnxt_hwrm_func_reset(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); - - if (!err) - result = PCI_ERS_RESULT_RECOVERED; + if (!err) { + err = bnxt_hwrm_func_qcaps(bp); + if (!err && netif_running(netdev)) + err = bnxt_open(netdev); + } bnxt_ulp_start(bp, err); + if (!err) { + bnxt_reenable_sriov(bp); + result = PCI_ERS_RESULT_RECOVERED; + } } if (result != PCI_ERS_RESULT_RECOVERED) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9e173d74b72a..78e2fd63ac3d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1746,6 +1746,11 @@ struct bnxt { #define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) char fw_ver_str[FW_VER_STR_LEN]; char hwrm_ver_supp[FW_VER_STR_LEN]; + u64 fw_ver_code; +#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ + ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) +#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) + __be16 vxlan_port; u8 vxlan_port_cnt; __le16 vxlan_fw_dst_port_id; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4a11c1e7cc02 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv) } static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, - struct flow_block_offload *f) + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct bnxt_flower_indr_block_cb_priv *cb_priv; struct flow_block_cb *block_cb; @@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, cb_priv->bp = bp; list_add(&cb_priv->list, &bp->tc_indr_block_list); - block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb, - cb_priv, cb_priv, - bnxt_tc_setup_indr_rel); + block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, + cb_priv, cb_priv, + bnxt_tc_setup_indr_rel, f, + netdev, data, bp, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); break; default: @@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) } static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { if (!bnxt_is_netdev_indr_offload(netdev)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_BLOCK: - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data); + return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, + cleanup); default: break; } @@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp) return; flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, - bnxt_tc_setup_indr_block_cb); + bnxt_tc_setup_indr_rel); rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->l2_table); rhashtable_destroy(&tc_info->decap_l2_table); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff31da0ed846..af924a8b885f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, genet_dma_ring_regs[r]); } -static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, - u32 f_index) -{ - u32 offset; - u32 reg; - - offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); - reg = bcmgenet_hfb_reg_readl(priv, offset); - return !!(reg & (1 << (f_index % 32))); -} - static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) { u32 offset; @@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, bcmgenet_hfb_reg_writel(priv, reg, offset); } -static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) -{ - u32 f_index; - - /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */ - for (f_index = MAX_NUM_OF_FS_RULES; - f_index < priv->hw_params->hfb_filter_cnt; f_index++) - if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) - return f_index; - - return -ENOMEM; -} - static int bcmgenet_hfb_validate_mask(void *mask, size_t size) { while (size) { @@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, { struct ethtool_rx_flow_spec *fs = &rule->fs; int err = 0, offset = 0, f_length = 0; - u16 val_16, mask_16; u8 val_8, mask_8; + __be16 val_16; + u16 mask_16; size_t size; u32 *f_data; @@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, return err; } -/* bcmgenet_hfb_add_filter - * - * Add new filter to Hardware Filter Block to match and direct Rx traffic to - * desired Rx queue. - * - * f_data is an array of unsigned 32-bit integers where each 32-bit integer - * provides filter data for 2 bytes (4 nibbles) of Rx frame: - * - * bits 31:20 - unused - * bit 19 - nibble 0 match enable - * bit 18 - nibble 1 match enable - * bit 17 - nibble 2 match enable - * bit 16 - nibble 3 match enable - * bits 15:12 - nibble 0 data - * bits 11:8 - nibble 1 data - * bits 7:4 - nibble 2 data - * bits 3:0 - nibble 3 data - * - * Example: - * In order to match: - * - Ethernet frame type = 0x0800 (IP) - * - IP version field = 4 - * - IP protocol field = 0x11 (UDP) - * - * The following filter is needed: - * u32 hfb_filter_ipv4_udp[] = { - * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, - * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, - * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, - * }; - * - * To add the filter to HFB and direct the traffic to Rx queue 0, call: - * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, - * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); - */ -int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, - u32 f_length, u32 rx_queue) -{ - int f_index; - - f_index = bcmgenet_hfb_find_unused_filter(priv); - if (f_index < 0) - return -ENOMEM; - - if (f_length > priv->hw_params->hfb_filter_size) - return -EINVAL; - - bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index); - bcmgenet_hfb_enable_filter(priv, f_index); - - return 0; -} - /* bcmgenet_hfb_clear * * Clear Hardware Filter Block and disable all filtering. @@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 7a3b22b35238..ebff1fc0d8ce 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5b9d7c60eebc..52582e8ed90e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2558,22 +2558,23 @@ static int macb_open(struct net_device *dev) err = macb_phylink_connect(bp); if (err) - goto napi_exit; + goto reset_hw; netif_tx_start_all_queues(dev); if (bp->ptp_info) bp->ptp_info->ptp_init(dev); -napi_exit: + return 0; + +reset_hw: + macb_reset_hw(bp); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) napi_disable(&queue->napi); + macb_free_consistent(bp); pm_exit: - if (err) { - pm_runtime_put_sync(&bp->pdev->dev); - return err; - } - return 0; + pm_runtime_put_sync(&bp->pdev->dev); + return err; } static int macb_close(struct net_device *dev) @@ -3761,15 +3762,9 @@ static int macb_init(struct platform_device *pdev) static struct sifive_fu540_macb_mgmt *mgmt; -/* Initialize and start the Receiver and Transmit subsystems */ -static int at91ether_start(struct net_device *dev) +static int at91ether_alloc_coherent(struct macb *lp) { - struct macb *lp = netdev_priv(dev); struct macb_queue *q = &lp->queues[0]; - struct macb_dma_desc *desc; - dma_addr_t addr; - u32 ctl; - int i; q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, (AT91ETHER_MAX_RX_DESCR * @@ -3791,6 +3786,43 @@ static int at91ether_start(struct net_device *dev) return -ENOMEM; } + return 0; +} + +static void at91ether_free_coherent(struct macb *lp) +{ + struct macb_queue *q = &lp->queues[0]; + + if (q->rx_ring) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + macb_dma_desc_get_size(lp), + q->rx_ring, q->rx_ring_dma); + q->rx_ring = NULL; + } + + if (q->rx_buffers) { + dma_free_coherent(&lp->pdev->dev, + AT91ETHER_MAX_RX_DESCR * + AT91ETHER_MAX_RBUFF_SZ, + q->rx_buffers, q->rx_buffers_dma); + q->rx_buffers = NULL; + } +} + +/* Initialize and start the Receiver and Transmit subsystems */ +static int at91ether_start(struct macb *lp) +{ + struct macb_queue *q = &lp->queues[0]; + struct macb_dma_desc *desc; + dma_addr_t addr; + u32 ctl; + int i, ret; + + ret = at91ether_alloc_coherent(lp); + if (ret) + return ret; + addr = q->rx_buffers_dma; for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { desc = macb_rx_desc(q, i); @@ -3812,9 +3844,39 @@ static int at91ether_start(struct net_device *dev) ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE)); + /* Enable MAC interrupts */ + macb_writel(lp, IER, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + return 0; } +static void at91ether_stop(struct macb *lp) +{ + u32 ctl; + + /* Disable MAC interrupts */ + macb_writel(lp, IDR, MACB_BIT(RCOMP) | + MACB_BIT(RXUBR) | + MACB_BIT(ISR_TUND) | + MACB_BIT(ISR_RLE) | + MACB_BIT(TCOMP) | + MACB_BIT(ISR_ROVR) | + MACB_BIT(HRESP)); + + /* Disable Receiver and Transmitter */ + ctl = macb_readl(lp, NCR); + macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); + + /* Free resources. */ + at91ether_free_coherent(lp); +} + /* Open the ethernet interface */ static int at91ether_open(struct net_device *dev) { @@ -3834,63 +3896,36 @@ static int at91ether_open(struct net_device *dev) macb_set_hwaddr(lp); - ret = at91ether_start(dev); + ret = at91ether_start(lp); if (ret) - return ret; - - /* Enable MAC interrupts */ - macb_writel(lp, IER, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); + goto pm_exit; ret = macb_phylink_connect(lp); if (ret) - return ret; + goto stop; netif_start_queue(dev); return 0; + +stop: + at91ether_stop(lp); +pm_exit: + pm_runtime_put_sync(&lp->pdev->dev); + return ret; } /* Close the interface */ static int at91ether_close(struct net_device *dev) { struct macb *lp = netdev_priv(dev); - struct macb_queue *q = &lp->queues[0]; - u32 ctl; - - /* Disable Receiver and Transmitter */ - ctl = macb_readl(lp, NCR); - macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE))); - - /* Disable MAC interrupts */ - macb_writel(lp, IDR, MACB_BIT(RCOMP) | - MACB_BIT(RXUBR) | - MACB_BIT(ISR_TUND) | - MACB_BIT(ISR_RLE) | - MACB_BIT(TCOMP) | - MACB_BIT(ISR_ROVR) | - MACB_BIT(HRESP)); netif_stop_queue(dev); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); - dma_free_coherent(&lp->pdev->dev, - AT91ETHER_MAX_RX_DESCR * - macb_dma_desc_get_size(lp), - q->rx_ring, q->rx_ring_dma); - q->rx_ring = NULL; - - dma_free_coherent(&lp->pdev->dev, - AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, - q->rx_buffers, q->rx_buffers_dma); - q->rx_buffers = NULL; + at91ether_stop(lp); return pm_runtime_put(&lp->pdev->dev); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 7b9cd69f9844..d8ab8e366818 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, u8 mem_type[CTXT_INGRESS + 1] = { 0 }; struct cudbg_buffer temp_buff = { 0 }; struct cudbg_ch_cntxt *buff; - u64 *dst_off, *src_off; u8 *ctx_buf; u8 i, k; int rc; @@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, } for (j = 0; j < max_ctx_qid; j++) { + __be64 *dst_off; + u64 *src_off; + src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); - dst_off = (u64 *)buff->data; + dst_off = (__be64 *)buff->data; /* The data is stored in 64-bit cpu order. Convert it * to big endian before parsing. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index d3c654b9989b..80c6627fe981 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val) ((val & 0x02) << 5) | ((val & 0x01) << 7); } + +extern const char * const dcb_ver_array[]; + #define CXGB4_DCB_ENABLED true #else /* !CONFIG_CHELSIO_T4_DCB */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 828499256004..b477b8842905 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = { }; #ifdef CONFIG_CHELSIO_T4_DCB -extern char *dcb_ver_array[]; /* Data Center Briging information for each port. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 9fd496732b2c..f27be1132d37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, /** * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware * capabilities - * @et_lmm: ethtool Link Mode Mask + * @link_mode_mask: ethtool Link Mode Mask * * Translate ethtool Link Mode Mask into a Firmware Port capabilities * value. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 796555255207..7a7f61a8cdf4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, unsigned int tid, bool dip, bool sip, bool dp, bool sp) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + if (dip) { if (f->fs.type) { set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, @@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f, } set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, - (dp ? f->fs.nat_lport : 0) | - (sp ? f->fs.nat_fport << 16 : 0), 1); + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) | + (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0), + 1); } /* Validate filter spec against configuration done on the card. */ @@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->fpm = htons(f->fs.mask.fport); if (adapter->params.filter2_wr_support) { + u8 *nat_lp = (u8 *)&f->fs.nat_lport; + u8 *nat_fp = (u8 *)&f->fs.nat_fport; + fwr->natmode_to_ulp_type = FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? ULP_MODE_TCPDDP : @@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); - fwr->newlport = htons(f->fs.nat_lport); - fwr->newfport = htons(f->fs.nat_fport); + fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8); + fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8); } /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (addr->s_addr == 0xffffffff) + if (ntohl(addr->s_addr) == 0xffffffff) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (addr6->s6_addr32[0] == 0xffffffff && - addr6->s6_addr32[1] == 0xffffffff && - addr6->s6_addr32[2] == 0xffffffff && - addr6->s6_addr32[3] == 0xffffffff) + if (ntohl(addr6->s6_addr32[0]) == 0xffffffff && + ntohl(addr6->s6_addr32[1]) == 0xffffffff && + ntohl(addr6->s6_addr32[2]) == 0xffffffff && + ntohl(addr6->s6_addr32[3]) == 0xffffffff) return true; } return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 854b1717a70d..0329a6b52087 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) * or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @smt_idx: the destination to store the new SMT index. * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if @@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t) * @stid: the server TID * @sip: local IP address to bind server to * @sport: the server's TCP port + * @vlan: the VLAN header information * @queue: queue to direct messages from this server to * * Create an IP server for the given port and address. @@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, /* Clear out filter specifications */ memset(&f->fs, 0, sizeof(struct ch_filter_specification)); - f->fs.val.lport = cpu_to_be16(sport); + f->fs.val.lport = be16_to_cpu(sport); f->fs.mask.lport = ~0; val = (u8 *)&sip; if ((val[0] | val[1] | val[2] | val[3]) != 0) { @@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc) static int cfg_queues(struct adapter *adap) { u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; - u32 i, n10g = 0, qidx = 0, n1g = 0; u32 ncpus = num_online_cpus(); u32 niqflint, neq, num_ulds; struct sge *s = &adap->sge; + u32 i, n10g = 0, qidx = 0; u32 q10g = 0, q1g; /* Reduce memory usage in kdump environment, disable all offload. */ @@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap) if (n10g) q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g; - n1g = adap->params.nports - n10g; #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging support we need to be able to support up * to 8 Traffic Priorities; each of which will be assigned to its @@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap) else q10g = max(8U, q10g); - while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g)) + while ((q10g * n10g) > + (avail_eth_qsets - (adap->params.nports - n10g) * q1g)) q10g--; #else /* !CONFIG_CHELSIO_T4_DCB */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index f5bc996ac77d..70dbee89118e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi) } /** + * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter * @ptp: ptp clock structure * @ppb: Desired frequency change in parts per billion * @@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) /** * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock - * @ptp: ptp clock structure + * @adapter: board private structure * @delta: Desired change in nanoseconds * * Adjust the timer by resetting the timecounter structure. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4a5fa9eba0b6..59b65d4db086 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), - PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), - PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), - PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), }; static struct ch_tc_flower_entry *allocate_flower_entry(void) @@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - fs->val.lport = cpu_to_be16(match.key->dst); - fs->mask.lport = cpu_to_be16(match.mask->dst); - fs->val.fport = cpu_to_be16(match.key->src); - fs->mask.fport = cpu_to_be16(match.mask->src); + fs->val.lport = be16_to_cpu(match.key->dst); + fs->mask.lport = be16_to_cpu(match.mask->dst); + fs->val.fport = be16_to_cpu(match.key->src); + fs->mask.fport = be16_to_cpu(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(match.key->dst); - fs->nat_fport = cpu_to_be16(match.key->src); + fs->nat_lport = fs->val.lport; + fs->nat_fport = fs->val.fport; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { @@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - TCP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), TCP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; break; @@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) - offload_pedit(fs, cpu_to_be32(val) >> 16, - cpu_to_be32(mask) >> 16, - UDP_SPORT); + fs->nat_fport = val; else - offload_pedit(fs, cpu_to_be32(val), - cpu_to_be32(mask), UDP_DPORT); + fs->nat_lport = val >> 16; } fs->nat_mode = NAT_MODE_ALL; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 3f3c11e54d97..dede02505ceb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap, bool next_header) { unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off, err; bool found; @@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) const struct cxgb4_next_header *next; bool found = false; unsigned int i, j; - u32 val, mask; + __be32 val, mask; int off; if (t->table[link_uhtid - 1].link_handle) { @@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) /* Try to find matches that allow jumps to next header. */ for (i = 0; next[i].jump; i++) { - if (next[i].offoff != cls->knode.sel->offoff || - next[i].shift != cls->knode.sel->offshift || - next[i].mask != cls->knode.sel->offmask || - next[i].offset != cls->knode.sel->off) + if (next[i].sel.offoff != cls->knode.sel->offoff || + next[i].sel.offshift != cls->knode.sel->offshift || + next[i].sel.offmask != cls->knode.sel->offmask || + next[i].sel.off != cls->knode.sel->off) continue; /* Found a possible candidate. Find a key that @@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) val = cls->knode.sel->keys[j].val; mask = cls->knode.sel->keys[j].mask; - if (next[i].match_off == off && - next[i].match_val == val && - next[i].match_mask == mask) { + if (next[i].key.off == off && + next[i].key.val == val && + next[i].key.mask == mask) { found = true; break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h index 125868c6770a..f59dd4b2ae6f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h @@ -38,12 +38,12 @@ struct cxgb4_match_field { int off; /* Offset from the beginning of the header to match */ /* Fill the value/mask pair in the spec if matched */ - int (*val)(struct ch_filter_specification *f, u32 val, u32 mask); + int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask); }; /* IPv4 match fields */ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 16) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF; @@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { u32 mask_val; u8 frag_val; @@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 16) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF; @@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = { /* IPv6 match fields */ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.tos = (ntohl(val) >> 20) & 0x000000FF; f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF; @@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.proto = (ntohl(val) >> 8) & 0x000000FF; f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF; @@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[0], &val, sizeof(u32)); memcpy(&f->mask.fip[0], &mask, sizeof(u32)); @@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[4], &val, sizeof(u32)); memcpy(&f->mask.fip[4], &mask, sizeof(u32)); @@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[8], &val, sizeof(u32)); memcpy(&f->mask.fip[8], &mask, sizeof(u32)); @@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.fip[12], &val, sizeof(u32)); memcpy(&f->mask.fip[12], &mask, sizeof(u32)); @@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[0], &val, sizeof(u32)); memcpy(&f->mask.lip[0], &mask, sizeof(u32)); @@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[4], &val, sizeof(u32)); memcpy(&f->mask.lip[4], &mask, sizeof(u32)); @@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[8], &val, sizeof(u32)); memcpy(&f->mask.lip[8], &mask, sizeof(u32)); @@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f, } static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { memcpy(&f->val.lip[12], &val, sizeof(u32)); memcpy(&f->mask.lip[12], &mask, sizeof(u32)); @@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = { /* TCP/UDP match */ static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f, - u32 val, u32 mask) + __be32 val, __be32 mask) { f->val.fport = ntohl(val) >> 16; f->mask.fport = ntohl(mask) >> 16; @@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = { }; struct cxgb4_next_header { - unsigned int offset; /* Offset to next header */ - /* offset, shift, and mask added to offset above + /* Offset, shift, and mask added to beginning of the header * to get to next header. Useful when using a header * field's value to jump to next header such as IHL field * in IPv4 header. */ - unsigned int offoff; - u32 shift; - u32 mask; - /* match criteria to make this jump */ - unsigned int match_off; - u32 match_val; - u32 match_mask; + struct tc_u32_sel sel; + struct tc_u32_key key; /* location of jump to make */ const struct cxgb4_match_field *jump; }; @@ -258,26 +252,74 @@ struct cxgb4_next_header { * IPv4 header. */ static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = { - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00, - .jump = cxgb4_tcp_fields }, - { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF, - .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00060000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 0, + .offoff = 0, + .offshift = 6, + .offmask = cpu_to_be16(0x0f00), + }, + .key = { + .off = 8, + .val = cpu_to_be32(0x00110000), + .mask = cpu_to_be32(0x00ff0000), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header * to get to transport layer header. */ static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = { - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000, - .jump = cxgb4_tcp_fields }, - { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0, - .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000, - .jump = cxgb4_udp_fields }, - { .jump = NULL } + { + /* TCP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00000600), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_tcp_fields, + }, + { + /* UDP Jump */ + .sel = { + .off = 40, + .offoff = 0, + .offshift = 0, + .offmask = 0, + }, + .key = { + .off = 4, + .val = cpu_to_be32(0x00001100), + .mask = cpu_to_be32(0x0000ff00), + }, + .jump = cxgb4_udp_fields, + }, + { .jump = NULL }, }; struct cxgb4_link { diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 72b37a66c7d8..c4864125fe02 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -502,41 +502,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev, } EXPORT_SYMBOL(cxgb4_select_ntuple); -/* - * Called when address resolution fails for an L2T entry to handle packets - * on the arpq head. If a packet specifies a failure handler it is invoked, - * otherwise the packet is sent to the device. - */ -static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&e->arpq)) != NULL) { - const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); - - spin_unlock(&e->lock); - if (cb->arp_err_handler) - cb->arp_err_handler(cb->handle, skb); - else - t4_ofld_send(adap, skb); - spin_lock(&e->lock); - } -} - /* * Called when the host's neighbor layer makes a change to some entry that is * loaded into the HW L2 table. */ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) { - struct l2t_entry *e; - struct sk_buff_head *arpq = NULL; - struct l2t_data *d = adap->l2t; unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; - int ifidx = neigh->dev->ifindex; - int hash = addr_hash(d, addr, addr_len, ifidx); + int hash, ifidx = neigh->dev->ifindex; + struct sk_buff_head *arpq = NULL; + struct l2t_data *d = adap->l2t; + struct l2t_entry *e; + hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) if (!addreq(e, addr) && e->ifindex == ifidx) { @@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) write_l2e(adap, e, 0); } - if (arpq) - handle_failed_resolution(adap, e); + if (arpq) { + struct sk_buff *skb; + + /* Called when address resolution fails for an L2T + * entry to handle packets on the arpq head. If a + * packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ + while ((skb = __skb_dequeue(&e->arpq)) != NULL) { + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + spin_unlock(&e->lock); + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + spin_lock(&e->lock); + } + } spin_unlock_bh(&e->lock); } @@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, } /** + * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters * @dev: net_device pointer * @vlan: VLAN Id * @port: Associated port diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index fde93c50cfec..a1b14468d1ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev, /** * cxgb4_sched_class_free - free a scheduling class * @dev: net_device pointer - * @e: scheduling class + * @classid: scheduling class id to free * * Frees a scheduling class if there are no users. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1359158652b7..32a45dc51ed7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb) /** * free_tx_desc - reclaims Tx descriptors and their buffers - * @adapter: the adapter + * @adap: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * @unmap: whether the buffers should be unmapped for DMA @@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n) /** * is_eth_imm - can an Ethernet packet be sent as immediate data? * @skb: the packet + * @chip_ver: chip version * * Returns whether an Ethernet packet is small enough to fit as * immediate data. Return value corresponds to headroom required. @@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) /** * calc_tx_flits - calculate the number of flits for a packet Tx WR * @skb: the packet + * @chip_ver: chip version * * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. @@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb, /** * calc_tx_descs - calculate the number of Tx descriptors for a packet * @skb: the packet + * @chip_ver: chip version * * Returns the number of Tx descriptors needed for the given Ethernet * packet, including the needed WR and CPL headers. @@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) qidx = skb_get_queue_mapping(skb); if (ptp_enabled) { - spin_lock(&adap->ptp_lock); if (!(adap->ptp_tx_skb)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; adap->ptp_tx_skb = skb_get(skb); } else { - spin_unlock(&adap->ptp_lock); goto out_free; } q = &adap->sge.ptptxq; @@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) #ifdef CONFIG_CHELSIO_T4_FCOE ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl); - if (unlikely(ret == -ENOTSUPP)) { - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); + if (unlikely(ret == -EOPNOTSUPP)) goto out_free; - } #endif /* CONFIG_CHELSIO_T4_FCOE */ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); @@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) dev_err(adap->pdev_dev, "%s: Tx ring %u full while queue awake!\n", dev->name, qidx); - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); return NETDEV_TX_BUSY; } @@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); q->mapping_err++; - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); goto out_free; } @@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (iph->version == 4) { iph->check = 0; iph->tot_len = 0; - iph->check = (u16)(~ip_fast_csum((u8 *)iph, - iph->ihl)); + iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); } if (skb->ip_summed == CHECKSUM_PARTIAL) cntrl = hwcsum(adap->params.chip, skb); @@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); - if (ptp_enabled) - spin_unlock(&adap->ptp_lock); return NETDEV_TX_OK; out_free: @@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(qid >= pi->nqsets)) return cxgb4_ethofld_xmit(skb, dev); + if (is_ptp_enabled(skb, dev)) { + struct adapter *adap = netdev2adap(dev); + netdev_tx_t ret; + + spin_lock(&adap->ptp_lock); + ret = cxgb4_eth_xmit(skb, dev); + spin_unlock(&adap->ptp_lock); + return ret; + } + return cxgb4_eth_xmit(skb, dev); } @@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) /** * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. - * @dev - netdevice - * @eotid - ETHOFLD tid to bind/unbind - * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid + * @dev: netdevice + * @eotid: ETHOFLD tid to bind/unbind + * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid * * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class. * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from @@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) /** * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion - * @adap: the adapter * @q: the queue to stop * * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting @@ -3286,7 +3286,7 @@ enum { /** * t4_systim_to_hwstamp - read hardware time stamp - * @adap: the adapter + * @adapter: the adapter * @skb: the packet * * Read Time Stamp from MPS packet and insert in skb which @@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter, hwtstamps = skb_hwtstamps(skb); memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data))); + hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); return RX_PTP_PKT_SUC; } /** * t4_rx_hststamp - Recv PTP Event Message - * @adap: the adapter + * @adapter: the adapter * @rsp: the response queue descriptor holding the RX_PKT message + * @rxq: the response queue holding the RX_PKT message * @skb: the packet * * PTP enabled and MPS packet, read HW timestamp @@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp, /** * t4_tx_hststamp - Loopback PTP Transmit Event Message - * @adap: the adapter + * @adapter: the adapter * @skb: the packet * @dev: the ingress net device * diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index 01c65d13fc0e..cbe72ed27b1e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e) } /** + * cxgb4_smt_release - Release SMT entry * @e: smt entry to release * * Releases ref count and frees up an smt entry from SMT table @@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, } /** + * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters. * @dev: net_device pointer * @smac: MAC address to add to SMT * Returns pointer to the SMT entry created diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1c8068c02728..1aa6dc10dc0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers) /** * t4_get_exprom_version - return the Expansion ROM version (if any) - * @adapter: the adapter + * @adap: the adapter * @vers: where to place the version * * Reads the Expansion ROM header from FLASH and returns the version @@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap) * @cmd: TP fw ldst address space type * @vals: where the indirect register values are stored/written * @nregs: how many indirect registers to read/write - * @start_idx: index of first indirect register to read/write + * @start_index: index of first indirect register to read/write * @rw: Read (1) or Write (0) * @sleep_ok: if true we may sleep while awaiting command completion * @@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]) /** * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port - * @adap: the adapter + * @adapter: the adapter * @pidx: the port index * * Computes and returns a bitmap indicating which MPS buffer groups are @@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx) /** * t4_get_tp_ch_map - return TP ingress channels associated with a port - * @adapter: the adapter + * @adap: the adapter * @pidx: the port index * * Returns a bitmap indicating which TP Ingress Channels are associated @@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write - * @valp: value to write + * @val: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ @@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, /** * t4_sge_decode_idma_state - decode the idma state - * @adap: the adapter + * @adapter: the adapter * @state: the state idma is stuck in */ void t4_sge_decode_idma_state(struct adapter *adapter, int state) @@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) * t4_sge_ctxt_flush - flush the SGE context cache * @adap: the adapter * @mbox: mailbox to use for the FW command - * @ctx_type: Egress or Ingress + * @ctxt_type: Egress or Ingress * * Issues a FW command through the given mailbox to flush the * SGE context cache. @@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) /** * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values - * @adap - the adapter + * @adap: the adapter * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table * @dbqtimers: SGE Doorbell Queue Timer table * @@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) /** * t4_fw_restart - restart the firmware by taking the uP out of RESET * @adap: the adapter + * @mbox: mailbox to use for the FW command * @reset: if we want to do a RESET to restart things * * Restart firmware previously halted by t4_fw_halt(). On successful @@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI + * @vivld: the destination to store the VI Valid value. + * @vin: the destination to store the VIN value. * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. @@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support * @adap: the adapter * @viid: the VI id - * @mac: the MAC address + * @addr: the MAC address * @mask: the mask * @vni: the VNI id for the tunnel protocol * @vni_mask: mask for the VNI id @@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam * @adap: the adapter * @viid: the VI id - * @mac: the MAC address + * @addr: the MAC address * @mask: the mask * @idx: index at which to add this entry - * @port_id: the port index * @lookup_type: MAC address for inner (1) or outer (0) header + * @port_id: the port index * @sleep_ok: call is allowed to sleep * * Adds the mac entry at the specified index using raw mac interface. @@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @smt_idx: the destination to store the new SMT index. * * Modifies an exact-match filter and sets it to the new MAC address. * Note that in general it is not possible to modify the value of a given @@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, /** * t4_link_down_rc_str - return a string for a Link Down Reason Code - * @adap: the adapter * @link_down_rc: Link Down Reason Code * * Returns a string representation of the Link Down Reason Code. @@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) return reason[link_down_rc]; } -/** - * Return the highest speed set in the port capabilities, in Mb/s. - */ +/* Return the highest speed set in the port capabilities, in Mb/s. */ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ @@ -9110,7 +9110,6 @@ static int t4_get_flash_params(struct adapter *adap) /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter - * @reset: if true perform a HW reset * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and @@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode, /** * t4_i2c_rd - read I2C data from adapter * @adap: the adapter + * @mbox: mailbox to use for the FW command * @port: Port number if per-port device; <0 if not * @devid: per-port device ID or absolute device ID * @offset: byte offset into device I2C space @@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, /** * t4_set_vlan_acl - Set a VLAN id for the specified VF - * @adapter: the adapter + * @adap: the adapter * @mbox: mailbox to use for the FW command * @vf: one of the VFs instantiated by the specified PF * @vlan: The vlanid to be set diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index cec865a97464..a7641be9094f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi) * @tcam_idx: TCAM index of existing filter for old value of MAC address, * or -1 * @addr: the new MAC address value - * @persist: whether a new MAC allocation should be persistent - * @add_smt: if true also add the address to the HW SMT + * @persistent: whether a new MAC allocation should be persistent * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index f71c973398ec..8c3d6e11a4bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc, * restore_rx_bufs - put back a packet's RX buffers * @gl: the packet gather list * @fl: the SGE Free List - * @nfrags: how many fragments in @si + * @frags: how many fragments in @si * * Called when we find out that the current packet, @si, can't be * processed right away for some reason. This is a very rare event and @@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter) /** * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues - * @data: the adapter + * @t: Rx timer * * Runs periodically from a timer to perform maintenance of SGE RX queues. * @@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t) /** * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues - * @data: the adapter + * @t: Tx timer * * Runs periodically from a timer to perform maintenance of SGE TX queues. * @@ -2405,6 +2405,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue * @adapter: the adapter * @txq: pointer to the new txq to be filled in + * @dev: the network device * @devq: the network TX queue associated with the new txq * @iqid: the relative ingress queue ID to which events relating to * the new txq should be directed diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 9d49ff211cc1..a31b87390b50 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) return cc_fec; } -/** - * Return the highest speed set in the port capabilities, in Mb/s. - */ +/* Return the highest speed set in the port capabilities, in Mb/s. */ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) { #define TEST_SPEED_RETURN(__caps_speed, __speed) \ @@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid, * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, * -1 no change + * @sleep_ok: call is allowed to sleep * * Sets Rx properties of a virtual interface. */ @@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) /** * t4vf_handle_get_port_info - process a FW reply message * @pi: the port info - * @rpl: start of the FW message + * @cmd: start of the FW message * * Processes a GET_PORT_INFO FW reply message. */ @@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) return 0; } -/** - */ int t4vf_prep_adapter(struct adapter *adapter) { int err; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 298c55786fd9..96831f49925c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1595,6 +1595,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en) return 0; } +static void enetc_enable_rxvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_bdr_enable_rxvlan(&priv->si->hw, i, en); +} + +static void enetc_enable_txvlan(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_bdr_enable_txvlan(&priv->si->hw, i, en); +} + int enetc_set_features(struct net_device *ndev, netdev_features_t features) { @@ -1604,6 +1622,14 @@ int enetc_set_features(struct net_device *ndev, if (changed & NETIF_F_RXHASH) enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(ndev, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); + if (changed & NETIF_F_HW_TC) err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 6314051bc6c1..ce0d321c0639 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -531,22 +531,22 @@ struct enetc_msg_cmd_header { /* Common H/W utility functions */ -static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx, - bool en) +static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx, + bool en) { - u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR); + u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); - enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val); + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val); } -static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx, - bool en) +static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx, + bool en) { - u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR); + u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR); val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); - enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val); + enetc_txbdr_wr(hw, idx, ENETC_TBMR, val); } static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 824d211ec00f..4fac57dbb3c8 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -649,14 +649,6 @@ static int enetc_pf_set_features(struct net_device *ndev, netdev_features_t changed = ndev->features ^ features; struct enetc_ndev_priv *priv = netdev_priv(ndev); - if (changed & NETIF_F_HW_VLAN_CTAG_RX) - enetc_enable_rxvlan(&priv->si->hw, 0, - !!(features & NETIF_F_HW_VLAN_CTAG_RX)); - - if (changed & NETIF_F_HW_VLAN_CTAG_TX) - enetc_enable_txvlan(&priv->si->hw, 0, - !!(features & NETIF_F_HW_VLAN_CTAG_TX)); - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { struct enetc_pf *pf = enetc_si_priv(priv->si); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index c117074c16e3..23f278e46975 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, struct net_device *ndev = ring_data->napi.dev; skb->protocol = eth_type_trans(skb, ndev); - (void)napi_gro_receive(&ring_data->napi, skb); + napi_gro_receive(&ring_data->napi, skb); } static int hns_desc_unused(struct hnae_ring *ring) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 96d36ae5049e..c5c732601e35 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 1b4d04e4474b..0fd7eae25fe9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -842,12 +842,13 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); int retry_count = 0; + int retries = 10; bool retry; int rc; do { retry = false; - if (retry_count > IBMVNIC_MAX_QUEUES) { + if (retry_count > retries) { netdev_warn(netdev, "Login attempts exceeded\n"); return -1; } @@ -862,11 +863,23 @@ static int ibmvnic_login(struct net_device *netdev) if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { - netdev_warn(netdev, "Login timed out\n"); - return -1; + netdev_warn(netdev, "Login timed out, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + continue; } - if (adapter->init_done_rc == PARTIALSUCCESS) { + if (adapter->init_done_rc == ABORTED) { + netdev_warn(netdev, "Login aborted, retrying...\n"); + retry = true; + adapter->init_done_rc = 0; + retry_count++; + /* FW or device may be busy, so + * wait a bit before retrying login + */ + msleep(500); + } else if (adapter->init_done_rc == PARTIALSUCCESS) { retry_count++; release_sub_crqs(adapter, 1); @@ -1958,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, release_sub_crqs(adapter, 1); } else { rc = ibmvnic_reset_crq(adapter); - if (!rc) + if (rc == H_CLOSED || rc == H_SUCCESS) { rc = vio_enable_interrupts(adapter->vdev); + if (rc) + netdev_err(adapter->netdev, + "Reset failed to enable interrupts. rc=%d\n", + rc); + } } if (rc) { netdev_err(adapter->netdev, - "Couldn't initialize crq. rc=%d\n", rc); + "Reset couldn't initialize crq. rc=%d\n", rc); goto out; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index d9fa4600f745..4b2de08137be 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -151,10 +151,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); -static int e1000_resume(struct pci_dev *pdev); -#endif +static int __maybe_unused e1000_suspend(struct device *dev); +static int __maybe_unused e1000_resume(struct device *dev); static void e1000_shutdown(struct pci_dev *pdev); #ifdef CONFIG_NET_POLL_CONTROLLER @@ -179,16 +177,16 @@ static const struct pci_error_handlers e1000_err_handler = { .resume = e1000_io_resume, }; +static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); + static struct pci_driver e1000_driver = { .name = e1000_driver_name, .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = e1000_suspend, - .resume = e1000_resume, -#endif + .driver = { + .pm = &e1000_pm_ops, + }, .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; @@ -5060,9 +5058,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) struct e1000_hw *hw = &adapter->hw; u32 ctrl, ctrl_ext, rctl, status; u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif netif_device_detach(netdev); @@ -5076,12 +5071,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) e1000_down(adapter); } -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -5142,37 +5131,26 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -#ifdef CONFIG_PM -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused e1000_suspend(struct device *dev) { int retval; + struct pci_dev *pdev = to_pci_dev(dev); bool wake; retval = __e1000_shutdown(pdev, &wake); - if (retval) - return retval; + device_set_wakeup_enable(dev, wake); - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } - - return 0; + return retval; } -static int e1000_resume(struct pci_dev *pdev) +static int __maybe_unused e1000_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_save_state(pdev); - if (adapter->need_ioport) err = pci_enable_device(pdev); else @@ -5209,7 +5187,6 @@ static int e1000_resume(struct pci_dev *pdev) return 0; } -#endif static void e1000_shutdown(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a279f4fa9962..6f6479ca1267 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6349,7 +6349,6 @@ static void e1000e_flush_lpic(struct pci_dev *pdev) pm_runtime_put_sync(netdev->dev.parent); } -#ifdef CONFIG_PM_SLEEP /* S0ix implementation */ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) { @@ -6571,7 +6570,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; ew32(CTRL_EXT, mac_data); } -#endif /* CONFIG_PM_SLEEP */ static int e1000e_pm_freeze(struct device *dev) { @@ -6611,11 +6609,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - u32 ctrl, ctrl_ext, rctl, status; - /* Runtime suspend should only enable wakeup for link changes */ - u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + u32 ctrl, ctrl_ext, rctl, status, wufc; int retval = 0; + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) + wufc = E1000_WUFC_LNKC; + else if (device_may_wakeup(&pdev->dev)) + wufc = adapter->wol; + else + wufc = 0; + status = er32(STATUS); if (status & E1000_STATUS_LU) wufc &= ~E1000_WUFC_LNKC; @@ -6672,7 +6676,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. */ @@ -6869,7 +6873,6 @@ static int e1000e_pm_thaw(struct device *dev) return rc; } -#ifdef CONFIG_PM static int __e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -6935,8 +6938,7 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int e1000e_pm_suspend(struct device *dev) +static __maybe_unused int e1000e_pm_suspend(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -6960,7 +6962,7 @@ static int e1000e_pm_suspend(struct device *dev) return rc; } -static int e1000e_pm_resume(struct device *dev) +static __maybe_unused int e1000e_pm_resume(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -6979,9 +6981,8 @@ static int e1000e_pm_resume(struct device *dev) return e1000e_pm_thaw(dev); } -#endif /* CONFIG_PM_SLEEP */ -static int e1000e_pm_runtime_idle(struct device *dev) +static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -6997,7 +6998,7 @@ static int e1000e_pm_runtime_idle(struct device *dev) return -EBUSY; } -static int e1000e_pm_runtime_resume(struct device *dev) +static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7014,7 +7015,7 @@ static int e1000e_pm_runtime_resume(struct device *dev) return rc; } -static int e1000e_pm_runtime_suspend(struct device *dev) +static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7039,7 +7040,6 @@ static int e1000e_pm_runtime_suspend(struct device *dev) return 0; } -#endif /* CONFIG_PM */ static void e1000_shutdown(struct pci_dev *pdev) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index aa8026b1eb81..67806b7b2f49 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2070,6 +2070,9 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; err = i40e_setup_rx_descriptors(&rx_rings[i]); + if (err) + goto rx_unwind; + err = i40e_alloc_rx_bi(&rx_rings[i]); if (err) goto rx_unwind; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..56ecd6c3f236 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, i40e_get_netdev_stats_struct_tx(ring, stats); if (i40e_enabled_xdp_vsi(vsi)) { - ring++; + ring = READ_ONCE(vsi->xdp_rings[i]); + if (!ring) + continue; i40e_get_netdev_stats_struct_tx(ring, stats); } - ring++; + ring = READ_ONCE(vsi->rx_rings[i]); + if (!ring) + continue; do { start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; @@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ p = READ_ONCE(vsi->tx_rings[q]); + if (!p) + continue; do { start = u64_stats_fetch_begin_irq(&p->syncp); @@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - /* Rx queue is part of the same block as Tx queue */ - p = &p[1]; + /* locate Rx ring */ + p = READ_ONCE(vsi->rx_rings[q]); + if (!p) + continue; + do { start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; @@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); + WRITE_ONCE(vsi->rx_rings[i], NULL); if (vsi->xdp_rings) - vsi->xdp_rings[i] = NULL; + WRITE_ONCE(vsi->xdp_rings[i], NULL); } } } @@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; - vsi->tx_rings[i] = ring++; + WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) goto setup_rx; @@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; - vsi->xdp_rings[i] = ring++; + WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: ring->queue_index = i; @@ -10892,7 +10901,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 28b46cc9f5cb..2e3a39cea2c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) for (i = 0; i < vsi->alloc_txq; i++) { if (vsi->tx_rings[i]) { kfree_rcu(vsi->tx_rings[i], rcu); - vsi->tx_rings[i] = NULL; + WRITE_ONCE(vsi->tx_rings[i], NULL); } } } @@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) for (i = 0; i < vsi->alloc_rxq; i++) { if (vsi->rx_rings[i]) { kfree_rcu(vsi->rx_rings[i], rcu); - vsi->rx_rings[i] = NULL; + WRITE_ONCE(vsi->rx_rings[i], NULL); } } } @@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->dev = dev; ring->count = vsi->num_tx_desc; - vsi->tx_rings[i] = ring; + WRITE_ONCE(vsi->tx_rings[i], ring); } /* Allocate Rx rings */ @@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; - vsi->rx_rings[i] = ring; + WRITE_ONCE(vsi->rx_rings[i], ring); } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 082825e3cb39..4cbd49c87568 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1702,7 +1702,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) xdp_ring->netdev = NULL; xdp_ring->dev = dev; xdp_ring->count = vsi->num_tx_desc; - vsi->xdp_rings[i] = xdp_ring; + WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; ice_set_ring_xdp(xdp_ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index fd9f5d41b594..2e35c5706cf1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = txr_idx; /* assign ring to adapter */ - adapter->tx_ring[txr_idx] = ring; + WRITE_ONCE(adapter->tx_ring[txr_idx], ring); /* update count and index */ txr_count--; @@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, set_ring_xdp(ring); /* assign ring to adapter */ - adapter->xdp_ring[xdp_idx] = ring; + WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); /* update count and index */ xdp_count--; @@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = rxr_idx; /* assign ring to adapter */ - adapter->rx_ring[rxr_idx] = ring; + WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); /* update count and index */ rxr_count--; @@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ixgbe_for_each_ring(ring, q_vector->tx) { if (ring_is_xdp(ring)) - adapter->xdp_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL); else - adapter->tx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL); } ixgbe_for_each_ring(ring, q_vector->rx) - adapter->rx_ring[ring->queue_index] = NULL; + WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL); adapter->q_vector[v_idx] = NULL; napi_hash_del(&q_vector->napi); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f162b8b8f345..97a423ecf808 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7051,7 +7051,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) } for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]); + + if (!rx_ring) + continue; non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; @@ -7072,15 +7075,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets = 0; /* gather some stats to the adapter struct that are per queue */ for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; bytes += tx_ring->stats.bytes; packets += tx_ring->stats.packets; } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i]; + struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]); + if (!xdp_ring) + continue; restart_queue += xdp_ring->tx_stats.restart_queue; tx_busy += xdp_ring->tx_stats.tx_busy; bytes += xdp_ring->stats.bytes; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..c639e3a29302 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -106,9 +106,11 @@ #define MVNETA_TX_IN_PRGRS BIT(1) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c +/* Only exists on Armada XP and Armada 370 */ #define MVNETA_SERDES_CFG 0x24A0 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 #define MVNETA_QSGMII_SERDES_PROTO 0x0667 +#define MVNETA_HSGMII_SERDES_PROTO 0x1107 #define MVNETA_TYPE_PRIO 0x24bc #define MVNETA_FORCE_UNI BIT(21) #define MVNETA_TXQ_CMD_1 0x24e4 @@ -3529,26 +3531,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } -static int mvneta_comphy_init(struct mvneta_port *pp) +static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) { int ret; - if (!pp->comphy) - return 0; - - ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, - pp->phy_interface); + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface); if (ret) return ret; return phy_power_on(pp->comphy); } +static int mvneta_config_interface(struct mvneta_port *pp, + phy_interface_t interface) +{ + int ret = 0; + + if (pp->comphy) { + if (interface == PHY_INTERFACE_MODE_SGMII || + interface == PHY_INTERFACE_MODE_1000BASEX || + interface == PHY_INTERFACE_MODE_2500BASEX) { + ret = mvneta_comphy_init(pp, interface); + } + } else { + switch (interface) { + case PHY_INTERFACE_MODE_QSGMII: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_QSGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_SGMII_SERDES_PROTO); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + mvreg_write(pp, MVNETA_SERDES_CFG, + MVNETA_HSGMII_SERDES_PROTO); + break; + default: + break; + } + } + + pp->phy_interface = interface; + + return ret; +} + static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - WARN_ON(mvneta_comphy_init(pp)); + WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3926,14 +3962,10 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, if (state->speed == SPEED_2500) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; - if (pp->comphy && pp->phy_interface != state->interface && - (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX)) { - pp->phy_interface = state->interface; - - WARN_ON(phy_power_off(pp->comphy)); - WARN_ON(mvneta_comphy_init(pp)); + if (pp->phy_interface != state->interface) { + if (pp->comphy) + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_config_interface(pp, state->interface)); } if (new_ctrl0 != gmac_ctrl0) @@ -4982,12 +5014,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - if (phy_mode == PHY_INTERFACE_MODE_QSGMII) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_interface_mode_is_8023z(phy_mode)) - mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - else if (!phy_interface_mode_is_rgmii(phy_mode)) + if (phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(phy_mode) && + !phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; return 0; @@ -5176,10 +5206,10 @@ static int mvneta_probe(struct platform_device *pdev) if (err < 0) goto err_netdev; - err = mvneta_port_power_up(pp, phy_mode); + err = mvneta_port_power_up(pp, pp->phy_interface); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_netdev; + return err; } /* Armada3700 network controller does not support per-cpu diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 2b5dad2ec650..24f4d8e0da98 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1544,7 +1544,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->ntxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - MVPP22_CTRS_TX_CTR(port->id, i), + MVPP22_CTRS_TX_CTR(port->id, q), mvpp2_ethtool_txq_regs[i].offset); /* Rxqs are numbered from 0 from the user standpoint, but not from the @@ -1553,7 +1553,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port) for (q = 0; q < port->nrxqs; q++) for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) *pstats++ += mvpp2_read_index(port->priv, - port->first_rxq + i, + port->first_rxq + q, mvpp2_ethtool_rxq_regs[i].offset); } @@ -5983,8 +5983,8 @@ static int mvpp2_remove(struct platform_device *pdev) { struct mvpp2 *priv = platform_get_drvdata(pdev); struct fwnode_handle *fwnode = pdev->dev.fwnode; + int i = 0, poolnum = MVPP2_BM_POOLS_NUM; struct fwnode_handle *port_fwnode; - int i = 0; mvpp2_dbgfs_cleanup(priv); @@ -5998,7 +5998,10 @@ static int mvpp2_remove(struct platform_device *pdev) destroy_workqueue(priv->stats_queue); - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + if (priv->percpu_pools) + poolnum = mvpp2_get_nrxqs(priv) * 2; + + for (i = 0; i < poolnum; i++) { struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index f1ace4fec19f..3e765bdcf9e1 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -24,7 +24,6 @@ #include #include #include -#include #define MTK_STAR_DRVNAME "mtk_star_emac" @@ -262,7 +261,6 @@ struct mtk_star_priv { spinlock_t lock; struct rtnl_link_stats64 stats; - struct work_struct stats_work; }; static struct device *mtk_star_get_dev(struct mtk_star_priv *priv) @@ -432,42 +430,6 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv) regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0); } -static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv) -{ - regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_BIT_INT_STS_TNTC); -} - -static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv) -{ - regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_BIT_INT_STS_FNRC); -} - -static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv) -{ - regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_REG_INT_STS_MIB_CNT_TH); -} - -static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv) -{ - regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_BIT_INT_STS_TNTC); -} - -static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv) -{ - regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_BIT_INT_STS_FNRC); -} - -static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv) -{ - regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK, - MTK_STAR_REG_INT_STS_MIB_CNT_TH); -} - static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv) { unsigned int val; @@ -663,20 +625,6 @@ static void mtk_star_update_stats(struct mtk_star_priv *priv) stats->rx_errors += stats->rx_fifo_errors; } -/* This runs in process context and parallel TX and RX paths executing in - * napi context may result in losing some stats data but this should happen - * seldom enough to be acceptable. - */ -static void mtk_star_update_stats_work(struct work_struct *work) -{ - struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv, - stats_work); - - mtk_star_update_stats(priv); - mtk_star_reset_counters(priv); - mtk_star_intr_enable_stats(priv); -} - static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev) { uintptr_t tail, offset; @@ -767,42 +715,25 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv) mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx); } -/* All processing for TX and RX happens in the napi poll callback. */ +/* All processing for TX and RX happens in the napi poll callback. + * + * FIXME: The interrupt handling should be more fine-grained with each + * interrupt enabled/disabled independently when needed. Unfortunatly this + * turned out to impact the driver's stability and until we have something + * working properly, we're disabling all interrupts during TX & RX processing + * or when resetting the counter registers. + */ static irqreturn_t mtk_star_handle_irq(int irq, void *data) { struct mtk_star_priv *priv; struct net_device *ndev; - bool need_napi = false; - unsigned int status; ndev = data; priv = netdev_priv(ndev); if (netif_running(ndev)) { - status = mtk_star_intr_read(priv); - - if (status & MTK_STAR_BIT_INT_STS_TNTC) { - mtk_star_intr_disable_tx(priv); - need_napi = true; - } - - if (status & MTK_STAR_BIT_INT_STS_FNRC) { - mtk_star_intr_disable_rx(priv); - need_napi = true; - } - - if (need_napi) - napi_schedule(&priv->napi); - - /* One of the counters reached 0x8000000 - update stats and - * reset all counters. - */ - if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) { - mtk_star_intr_disable_stats(priv); - schedule_work(&priv->stats_work); - } - - mtk_star_intr_ack_all(priv); + mtk_star_intr_disable(priv); + napi_schedule(&priv->napi); } return IRQ_HANDLED; @@ -1169,8 +1100,6 @@ static void mtk_star_tx_complete_all(struct mtk_star_priv *priv) if (wake && netif_queue_stopped(ndev)) netif_wake_queue(ndev); - mtk_star_intr_enable_tx(priv); - spin_unlock(&priv->lock); } @@ -1332,20 +1261,32 @@ static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget) static int mtk_star_poll(struct napi_struct *napi, int budget) { struct mtk_star_priv *priv; + unsigned int status; int received = 0; priv = container_of(napi, struct mtk_star_priv, napi); - /* Clean-up all TX descriptors. */ - mtk_star_tx_complete_all(priv); - /* Receive up to $budget packets. */ - received = mtk_star_process_rx(priv, budget); + status = mtk_star_intr_read(priv); + mtk_star_intr_ack_all(priv); - if (received < budget) { - napi_complete_done(napi, received); - mtk_star_intr_enable_rx(priv); + if (status & MTK_STAR_BIT_INT_STS_TNTC) + /* Clean-up all TX descriptors. */ + mtk_star_tx_complete_all(priv); + + if (status & MTK_STAR_BIT_INT_STS_FNRC) + /* Receive up to $budget packets. */ + received = mtk_star_process_rx(priv, budget); + + if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) { + mtk_star_update_stats(priv); + mtk_star_reset_counters(priv); } + if (received < budget) + napi_complete_done(napi, received); + + mtk_star_intr_enable(priv); + return received; } @@ -1532,7 +1473,6 @@ static int mtk_star_probe(struct platform_device *pdev) ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE; spin_lock_init(&priv->lock); - INIT_WORK(&priv->stats_work, mtk_star_update_stats_work); base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 80713123de5c..eefeb1cdc2ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -407,7 +407,9 @@ static int mlx5e_rep_indr_setup_block(struct net_device *netdev, struct mlx5e_rep_priv *rpriv, struct flow_block_offload *f, - flow_setup_cb_t *setup_cb) + flow_setup_cb_t *setup_cb, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct mlx5e_rep_indr_block_priv *indr_priv; @@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, list_add(&indr_priv->list, &rpriv->uplink_priv.tc_indr_block_priv_list); - block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv, - mlx5e_rep_indr_block_unbind); + block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, + mlx5e_rep_indr_block_unbind, + f, netdev, data, rpriv, + cleanup); if (IS_ERR(block_cb)) { list_del(&indr_priv->list); kfree(indr_priv); @@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: @@ -469,15 +473,19 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, static int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { switch (type) { case TC_SETUP_BLOCK: return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_tc_cb); + mlx5e_rep_indr_setup_tc_cb, + data, cleanup); case TC_SETUP_FT: return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, - mlx5e_rep_indr_setup_ft_cb); + mlx5e_rep_indr_setup_ft_cb, + data, cleanup); default: return -EOPNOTSUPP; } @@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) { flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv, - mlx5e_rep_indr_setup_tc_cb); + mlx5e_rep_indr_block_unbind); } #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5ffa32b75e5f..029ea344ad65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -978,8 +978,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, lossy = !(pfc || pause_en); thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, pause_en); + delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); total_cells = thres_cells + delay_cells; taken_headroom_cells += total_cells; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6f96ca50c9ba..3abe3e7d89bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -374,6 +374,17 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port, return NULL; } +static inline u32 +mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port, + u32 size_cells) +{ + /* Ports with eight lanes use two headroom buffers between which the + * configured headroom size is split. Therefore, multiply the calculated + * headroom size by two. + */ + return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells; +} + enum mlxsw_sp_flood_type { MLXSW_SP_FLOOD_TYPE_UC, MLXSW_SP_FLOOD_TYPE_BC, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 21bfb2f6a6f0..6f84557a5a6f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -312,6 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) if (i == MLXSW_SP_PB_UNUSED) continue; + size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size); mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); } mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 304eb8c3d8bd..92351a79addc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -782,6 +782,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) speed = 0; buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu); + buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize); mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index c5c5c688b7e2..f1711ac86d0c 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3091,6 +3091,8 @@ static const struct pci_device_id lan743x_pcidev_tbl[] = { { 0, } }; +MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl); + static struct pci_driver lan743x_pcidev_driver = { .name = DRIVER_NAME, .id_table = lan743x_pcidev_tbl, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index 628fa9b2f741..373165119850 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -297,7 +297,7 @@ struct vxge_hw_fifo_config { * @greedy_return: If Set it forces the device to return absolutely all RxD * that are consumed and still on board when a timer interrupt * triggers. If Clear, then if the device has already returned - * RxD before current timer interrupt trigerred and after the + * RxD before current timer interrupt triggered and after the * previous timer interrupt triggered, then the device is not * forced to returned the rest of the consumed RxD that it has * on board which account for a byte count less than the one diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index c39327677a7d..bb448c82cdc2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app) flush_work(&app_priv->cmsg_work); flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, - nfp_flower_setup_indr_block_cb); + nfp_flower_setup_indr_tc_release); if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) nfp_flower_qos_cleanup(app); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 6c3dc3baf387..7f54a620acad 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -459,9 +459,10 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow); void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data); -int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); +void nfp_flower_setup_indr_tc_release(void *cb_priv); void __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 695d24b9dd92..d7340dc09b4c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, return NULL; } -int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, - void *type_data, void *cb_priv) +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) { struct nfp_flower_indr_block_cb_priv *priv = cb_priv; struct flow_cls_offload *flower = type_data; @@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, } } -static void nfp_flower_setup_indr_tc_release(void *cb_priv) +void nfp_flower_setup_indr_tc_release(void *cb_priv) { struct nfp_flower_indr_block_cb_priv *priv = cb_priv; @@ -1647,7 +1647,8 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv) static int nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, - struct flow_block_offload *f) + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; @@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, cb_priv->app = app; list_add(&cb_priv->list, &priv->indr_block_cb_priv); - block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb, - cb_priv, cb_priv, - nfp_flower_setup_indr_tc_release); + block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, + cb_priv, cb_priv, + nfp_flower_setup_indr_tc_release, + f, netdev, data, app, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, if (!block_cb) return -ENOENT; - flow_block_cb_remove(block_cb, f); + flow_indr_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: @@ -1710,7 +1712,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, - enum tc_setup_type type, void *type_data) + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) { if (!nfp_fl_is_netdev_to_offload(netdev)) return -EOPNOTSUPP; @@ -1718,7 +1722,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: return nfp_flower_setup_indr_tc_block(netdev, cb_priv, - type_data); + type_data, data, cleanup); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 32b9d7705404..55cef5b16aa5 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -147,7 +147,7 @@ struct pch_gbe_regs { #define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */ #define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */ #define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */ -/* RX FIFO Read Triger Threshold */ +/* RX FIFO Read Trigger Threshold */ #define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */ #define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */ #define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 9d8c969f21cb..aaa00edd9d5b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -96,7 +96,8 @@ static void ionic_link_status_check(struct ionic_lif *lif) u16 link_status; bool link_up; - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || + test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) return; link_status = le16_to_cpu(lif->info->status.link_status); @@ -1245,6 +1246,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev->hw_features |= netdev->hw_enc_features; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; @@ -1692,15 +1694,15 @@ static void ionic_stop_queues(struct ionic_lif *lif) if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state)) return; - ionic_txrx_disable(lif); netif_tx_disable(lif->netdev); + ionic_txrx_disable(lif); } int ionic_stop(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); - if (!netif_device_present(netdev)) + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return 0; ionic_stop_queues(lif); @@ -1983,18 +1985,19 @@ int ionic_reset_queues(struct ionic_lif *lif) bool running; int err = 0; - /* Put off the next watchdog timeout */ - netif_trans_update(lif->netdev); - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); if (err) return err; running = netif_running(lif->netdev); - if (running) + if (running) { + netif_device_detach(lif->netdev); err = ionic_stop(lif->netdev); - if (!err && running) + } + if (!err && running) { ionic_open(lif->netdev); + netif_device_attach(lif->netdev); + } clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7b76667acaba..08ba9d54ab63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) return p_blk; } +static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 cli_idx, blk_idx; + + for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { + for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) + clients[cli_idx].pf_blks[blk_idx].total_size = 0; + + for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) + clients[cli_idx].vf_blks[blk_idx].total_size = 0; + } +} + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; @@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); + /* Reset all ILT blocks at the beginning of ILT computing in order + * to prevent memory allocation for irrelevant blocks afterwards. + */ + qed_cxt_ilt_blk_reset(p_hwfn); + DP_VERBOSE(p_hwfn, QED_MSG_ILT, "hwfn [%d] - Set context manager starting line to be 0x%08x\n", p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 57a0dab88431..81e8fbe4a05b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */ "The filter/trigger constraint dword offsets are not enabled for recording", - + /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */ + "No matching framing mode", /* DBG_STATUS_VFC_READ_ERROR */ "Error reading from VFC", diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 1eebf30fa798..3aa51374e727 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); union qed_llh_filter filter = {}; - u8 filter_idx, abs_ppfid; + u8 filter_idx, abs_ppfid = 0; u32 high, low, ref_cnt; int rc = 0; @@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) void qed_resc_free(struct qed_dev *cdev) { + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; int i; if (IS_VF(cdev)) { @@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev) qed_llh_free(cdev); for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; qed_cxt_mngr_free(p_hwfn); qed_qm_info_free(p_hwfn); @@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev) qed_ooo_free(p_hwfn); } - if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); qed_rdma_info_free(p_hwfn); + } qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index d2fe61a5cf56..5409a2da6106 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn) if (rc) return rc; - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); - return qed_iwarp_ll2_stop(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 4566815f7b87..7271dd7166e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) break; } } - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 856051f50eb7..adc2c8f3d48e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 756c05eb96f3..29e285430f99 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); + qede_ptp_enable(edev); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) if (system_state == SYSTEM_POWER_OFF) return; qed_ops->common->remove(cdev); + edev->cdev = NULL; /* Since this can happen out-of-sync with other flows, * don't release the netdevice until after slowpath stop diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 4c7f7a7fc151..cd5841a9415e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev) if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; + clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags); } /* Disable PTP in HW */ @@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev) edev->ptp = NULL; } -static int qede_ptp_init(struct qede_dev *edev, bool init_tc) +static int qede_ptp_init(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc) /* Init work queue for Tx timestamping */ INIT_WORK(&ptp->work, qede_ptp_task); - /* Init cyclecounter and timecounter. This is done only in the first - * load. If done in every load, PTP application will fail when doing - * unload / load (e.g. MTU change) while it is running. - */ - if (init_tc) { - memset(&ptp->cc, 0, sizeof(ptp->cc)); - ptp->cc.read = qede_ptp_read_cc; - ptp->cc.mask = CYCLECOUNTER_MASK(64); - ptp->cc.shift = 0; - ptp->cc.mult = 1; + /* Init cyclecounter and timecounter */ + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = qede_ptp_read_cc; + ptp->cc.mask = CYCLECOUNTER_MASK(64); + ptp->cc.shift = 0; + ptp->cc.mult = 1; - timecounter_init(&ptp->tc, &ptp->cc, - ktime_to_ns(ktime_get_real())); - } + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); - return rc; + return 0; } -int qede_ptp_enable(struct qede_dev *edev, bool init_tc) +int qede_ptp_enable(struct qede_dev *edev) { struct qede_ptp *ptp; int rc; @@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) edev->ptp = ptp; - rc = qede_ptp_init(edev, init_tc); + rc = qede_ptp_init(edev); if (rc) goto err1; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 691a14c4b2c5..89c7f3cf3ee2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb); void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); -int qede_ptp_enable(struct qede_dev *edev, bool init_tc); +int qede_ptp_enable(struct qede_dev *edev); int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 2d873ae8a234..668ccc9d49f8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) qede_rdma_cleanup_event(edev); destroy_workqueue(edev->rdma_info.rdma_wq); + edev->rdma_info.rdma_wq = NULL; } int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) @@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev, if (edev->rdma_info.exp_recovery) return; - if (!edev->rdma_info.qedr_dev) + if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq) return; /* We don't want the cleanup flow to start while we're allocating and diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 8d7b9bb910f2..10037639ac2c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -269,7 +269,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; @@ -286,7 +286,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; @@ -315,7 +315,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -337,7 +337,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -402,7 +402,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg *pm_cfg; u32 id, action, pci_func; @@ -452,7 +452,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pm_func_cfg *pm_cfg; u8 pci_func; @@ -545,7 +545,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_func_cfg *esw_cfg; struct qlcnic_npar_info *npar; @@ -629,7 +629,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_func_cfg *esw_cfg; u8 pci_func; @@ -681,7 +681,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_info nic_info; struct qlcnic_npar_func_cfg *np_cfg; @@ -728,7 +728,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_npar_func_cfg *np_cfg; struct qlcnic_info nic_info; @@ -775,7 +775,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_statistics port_stats; int ret; @@ -810,7 +810,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_esw_statistics esw_stats; int ret; @@ -845,7 +845,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; @@ -875,7 +875,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); int ret; @@ -904,7 +904,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_pci_func_cfg *pci_cfg; struct qlcnic_pci_info *pci_info; @@ -946,7 +946,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, { unsigned char *p_read_buf; int ret, count; - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); if (!size) @@ -1124,7 +1124,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, int ret; static int flash_mode; unsigned long data; - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); ret = kstrtoul(buf, 16, &data); diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index dad84ecf5a77..b660ddbe4025 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2114,8 +2114,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp) void r8169_apply_firmware(struct rtl8169_private *tp) { /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ - if (tp->rtl_fw) + if (tp->rtl_fw) { rtl_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + } } static void rtl8168_config_eee_mac(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 7585cd2270ba..fc99e7118e49 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) err_dma_event_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->event_ring); err_dma_event_ring_create: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_waits_alloc: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_waits_alloc: - rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 328bc38848bb..0f366cc50b74 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) skb->ip_summed = CHECKSUM_UNNECESSARY; next: - if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) || - xdp_result) { + if (skb) + napi_gro_receive(&priv->napi, skb); + if (skb || xdp_result) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += xdp.data_end - xdp.data; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index fbaf3c987d9c..f34c7903ff52 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -186,7 +186,7 @@ #define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */ #define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */ #define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */ -/* Exteneded Multicast Filtering mode */ +/* Extended Multicast Filtering mode */ #define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 #define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */ #define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 75266580b586..4661ef865807 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1649,6 +1649,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; geneve_unquiesce(geneve, gs4, gs6); return 0; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index f25702386d83..e351d65533aa 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -480,8 +480,7 @@ config MICROCHIP_T1_PHY config MICROSEMI_PHY tristate "Microsemi PHYs" depends on MACSEC || MACSEC=n - select CRYPTO_AES - select CRYPTO_ECB + select CRYPTO_LIB_AES if MACSEC help Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index b4d3dc4068e2..d53ca884b5c9 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include @@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow) static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN], u16 key_len, u8 hkey[16]) { - struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); - struct skcipher_request *req = NULL; - struct scatterlist src, dst; - DECLARE_CRYPTO_WAIT(wait); - u32 input[4] = {0}; + const u8 input[AES_BLOCK_SIZE] = {0}; + struct crypto_aes_ctx ctx; int ret; - if (IS_ERR(tfm)) - return PTR_ERR(tfm); + ret = aes_expandkey(&ctx, key, key_len); + if (ret) + return ret; - req = skcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) { - ret = -ENOMEM; - goto out; - } - - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, - &wait); - ret = crypto_skcipher_setkey(tfm, key, key_len); - if (ret < 0) - goto out; - - sg_init_one(&src, input, 16); - sg_init_one(&dst, hkey, 16); - skcipher_request_set_crypt(req, &src, &dst, 16, NULL); - - ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); - -out: - skcipher_request_free(req); - crypto_free_skcipher(tfm); - return ret; + aes_encrypt(&ctx, hkey, input); + memzero_explicit(&ctx, sizeof(ctx)); + return 0; } static int vsc8584_macsec_transformation(struct phy_device *phydev, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1de3938628f4..56cfae950472 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -840,7 +840,7 @@ static void phy_error(struct phy_device *phydev) * phy_disable_interrupts - Disable the PHY interrupts from the PHY side * @phydev: target phy_device struct */ -static int phy_disable_interrupts(struct phy_device *phydev) +int phy_disable_interrupts(struct phy_device *phydev) { int err; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 04946de74fa0..b4978c5fb2ca 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -794,8 +794,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); - if (phy_reg < 0) - return -EIO; + if (phy_reg < 0) { + /* returning -ENODEV doesn't stop bus scanning */ + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; + } *phy_id |= phy_reg; @@ -1090,6 +1092,10 @@ int phy_init_hw(struct phy_device *phydev) if (ret < 0) return ret; + ret = phy_disable_interrupts(phydev); + if (ret) + return ret; + if (phydev->drv->config_init) ret = phydev->drv->config_init(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0ab65fb75258..3b7c70e6c5dd 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1463,6 +1463,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, struct ethtool_pauseparam *pause) { struct phylink_link_state *config = &pl->link_config; + bool manual_changed; + int pause_state; ASSERT_RTNL(); @@ -1477,15 +1479,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, !pause->autoneg && pause->rx_pause != pause->tx_pause) return -EINVAL; - mutex_lock(&pl->state_mutex); - config->pause = 0; + pause_state = 0; if (pause->autoneg) - config->pause |= MLO_PAUSE_AN; + pause_state |= MLO_PAUSE_AN; if (pause->rx_pause) - config->pause |= MLO_PAUSE_RX; + pause_state |= MLO_PAUSE_RX; if (pause->tx_pause) - config->pause |= MLO_PAUSE_TX; + pause_state |= MLO_PAUSE_TX; + mutex_lock(&pl->state_mutex); /* * See the comments for linkmode_set_pause(), wrt the deficiencies * with the current implementation. A solution to this issue would @@ -1502,18 +1504,35 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, linkmode_set_pause(config->advertising, pause->tx_pause, pause->rx_pause); - /* If we have a PHY, phylib will call our link state function if the - * mode has changed, which will trigger a resolve and update the MAC - * configuration. + manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN || + (!(pause_state & MLO_PAUSE_AN) && + (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK); + + config->pause = pause_state; + + if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_pcs_config(pl, true, &pl->link_config); + + mutex_unlock(&pl->state_mutex); + + /* If we have a PHY, a change of the pause frame advertisement will + * cause phylib to renegotiate (if AN is enabled) which will in turn + * call our phylink_phy_change() and trigger a resolve. Note that + * we can't hold our state mutex while calling phy_set_asym_pause(). */ - if (pl->phydev) { + if (pl->phydev) phy_set_asym_pause(pl->phydev, pause->rx_pause, pause->tx_pause); - } else if (!test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) { - phylink_pcs_config(pl, true, &pl->link_config); + + /* If the manual pause settings changed, make sure we trigger a + * resolve to update their state; we can not guarantee that the + * link will cycle. + */ + if (manual_changed) { + pl->mac_link_dropped = true; + phylink_run_resolve(pl); } - mutex_unlock(&pl->state_mutex); return 0; } diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 93da7d3d0954..74568ae16125 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev) if (rc < 0) return rc; - /* Wait max 640 ms to detect energy */ - phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc, - rc & MII_LAN83C185_ENERGYON, 10000, - 640000, true); + /* Wait max 640 ms to detect energy and the timeout is not + * an actual error. + */ + read_poll_timeout(phy_read, rc, + rc & MII_LAN83C185_ENERGYON || rc < 0, + 10000, 640000, true, phydev, + MII_LAN83C185_CTRL_STATUS); if (rc < 0) return rc; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 950711448f39..a38e868e44d4 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1491,10 +1491,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1503,8 +1503,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 355be77f4241..3cf4dc3433f9 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1324,7 +1324,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); if (pdata) { - cancel_delayed_work(&pdata->carrier_check); + cancel_delayed_work_sync(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e8085ab6d484..89d85dcb200e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1380,6 +1380,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct vxlan_rdst *rd; if (rcu_access_pointer(f->nh)) { + if (*idx < cb->args[2]) + goto skip_nh; err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -1387,6 +1389,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, NULL); if (err < 0) goto out; +skip_nh: + *idx += 1; continue; } diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 3ac3f8570ca1..a8f151b1b5fa 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev) if (dev_v6) dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; + mutex_lock(&wg->device_update_lock); ret = wg_socket_init(wg, wg->incoming_port); if (ret < 0) - return ret; - mutex_lock(&wg->device_update_lock); + goto out; list_for_each_entry(peer, &wg->peer_list, peer_list) { wg_packet_send_staged_packets(peer); if (peer->persistent_keepalive_interval) wg_packet_send_keepalive(peer); } +out: mutex_unlock(&wg->device_update_lock); - return 0; + return ret; } #ifdef CONFIG_PM_SLEEP @@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev) list_del(&wg->device_list); rtnl_unlock(); mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); wg->incoming_port = 0; wg_socket_reinit(wg, NULL, NULL); /* The final references are cleared in the below calls to destroy_workqueue. */ @@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev) skb_queue_purge(&wg->incoming_handshakes); free_percpu(dev->tstats); free_percpu(wg->incoming_handshakes_worker); - if (wg->have_creating_net_ref) - put_net(wg->creating_net); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); - pr_debug("%s: Interface deleted\n", dev->name); + pr_debug("%s: Interface destroyed\n", dev->name); free_netdev(dev); } @@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev, struct wg_device *wg = netdev_priv(dev); int ret = -ENOMEM; - wg->creating_net = src_net; + rcu_assign_pointer(wg->creating_net, src_net); init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); @@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __read_mostly = { .newlink = wg_newlink, }; -static int wg_netdevice_notification(struct notifier_block *nb, - unsigned long action, void *data) +static void wg_netns_pre_exit(struct net *net) { - struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; - struct wg_device *wg = netdev_priv(dev); + struct wg_device *wg; - ASSERT_RTNL(); - - if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) - return 0; - - if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { - put_net(wg->creating_net); - wg->have_creating_net_ref = false; - } else if (dev_net(dev) != wg->creating_net && - !wg->have_creating_net_ref) { - wg->have_creating_net_ref = true; - get_net(wg->creating_net); + rtnl_lock(); + list_for_each_entry(wg, &device_list, device_list) { + if (rcu_access_pointer(wg->creating_net) == net) { + pr_debug("%s: Creating namespace exiting\n", wg->dev->name); + netif_carrier_off(wg->dev); + mutex_lock(&wg->device_update_lock); + rcu_assign_pointer(wg->creating_net, NULL); + wg_socket_reinit(wg, NULL, NULL); + mutex_unlock(&wg->device_update_lock); + } } - return 0; + rtnl_unlock(); } -static struct notifier_block netdevice_notifier = { - .notifier_call = wg_netdevice_notification +static struct pernet_operations pernet_ops = { + .pre_exit = wg_netns_pre_exit }; int __init wg_device_init(void) @@ -429,18 +425,18 @@ int __init wg_device_init(void) return ret; #endif - ret = register_netdevice_notifier(&netdevice_notifier); + ret = register_pernet_device(&pernet_ops); if (ret) goto error_pm; ret = rtnl_link_register(&link_ops); if (ret) - goto error_netdevice; + goto error_pernet; return 0; -error_netdevice: - unregister_netdevice_notifier(&netdevice_notifier); +error_pernet: + unregister_pernet_device(&pernet_ops); error_pm: #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&pm_notifier); @@ -451,7 +447,7 @@ int __init wg_device_init(void) void wg_device_uninit(void) { rtnl_link_unregister(&link_ops); - unregister_netdevice_notifier(&netdevice_notifier); + unregister_pernet_device(&pernet_ops); #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&pm_notifier); #endif diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h index b15a8be9d816..4d0144e16947 100644 --- a/drivers/net/wireguard/device.h +++ b/drivers/net/wireguard/device.h @@ -40,7 +40,7 @@ struct wg_device { struct net_device *dev; struct crypt_queue encrypt_queue, decrypt_queue; struct sock __rcu *sock4, *sock6; - struct net *creating_net; + struct net __rcu *creating_net; struct noise_static_identity static_identity; struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; struct workqueue_struct *packet_crypt_wq; @@ -56,7 +56,6 @@ struct wg_device { unsigned int num_peers, device_update_gen; u32 fwmark; u16 incoming_port; - bool have_creating_net_ref; }; int wg_device_init(void); diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c index 802099c8828a..20a4f3c0a0a1 100644 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info) if (flags & ~__WGDEVICE_F_ALL) goto out; - ret = -EPERM; - if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || - info->attrs[WGDEVICE_A_FWMARK]) && - !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) - goto out; + if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { + struct net *net; + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; + rcu_read_unlock(); + if (ret) + goto out; + } ++wg->device_update_gen; diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 626433690abb..201a22681945 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, memcpy(handshake->hash, hash, NOISE_HASH_LEN); memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); handshake->remote_index = src->sender_index; - if ((s64)(handshake->last_initiation_consumption - - (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) + initiation_consumption = ktime_get_coarse_boottime_ns(); + if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) handshake->last_initiation_consumption = initiation_consumption; handshake->state = HANDSHAKE_CONSUMED_INITIATION; up_write(&handshake->lock); diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 91438144e4f7..9b2ab6fc91cd 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, if (unlikely(routed_peer != peer)) goto dishonest_packet_peer; - if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { - ++dev->stats.rx_dropped; - net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", - dev->name, peer->internal_id, - &peer->endpoint.addr); - } else { - update_rx_stats(peer, message_data_len(len_before_trim)); - } + napi_gro_receive(&peer->napi, skb); + update_rx_stats(peer, message_data_len(len_before_trim)); return; dishonest_packet_peer: diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index f9018027fc13..c33e2c81635f 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock) int wg_socket_init(struct wg_device *wg, u16 port) { + struct net *net; int ret; struct udp_tunnel_sock_cfg cfg = { .sk_user_data = wg, @@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port) }; #endif + rcu_read_lock(); + net = rcu_dereference(wg->creating_net); + net = net ? maybe_get_net(net) : NULL; + rcu_read_unlock(); + if (unlikely(!net)) + return -ENONET; + #if IS_ENABLED(CONFIG_IPV6) retry: #endif - ret = udp_sock_create(wg->creating_net, &port4, &new4); + ret = udp_sock_create(net, &port4, &new4); if (ret < 0) { pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); - return ret; + goto out; } set_sock_opts(new4); - setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); + setup_udp_tunnel_sock(net, new4, &cfg); #if IS_ENABLED(CONFIG_IPV6) if (ipv6_mod_enabled()) { port6.local_udp_port = inet_sk(new4->sk)->inet_sport; - ret = udp_sock_create(wg->creating_net, &port6, &new6); + ret = udp_sock_create(net, &port6, &new6); if (ret < 0) { udp_tunnel_sock_release(new4); if (ret == -EADDRINUSE && !port && retries++ < 100) goto retry; pr_err("%s: Could not create IPv6 socket\n", wg->dev->name); - return ret; + goto out; } set_sock_opts(new6); - setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); + setup_udp_tunnel_sock(net, new6, &cfg); } #endif wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); - return 0; + ret = 0; +out: + put_net(net); + return ret; } void wg_socket_reinit(struct wg_device *wg, struct sock *new4, diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index bc8c15fb609d..080e5aa60bea 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb) void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, struct wil_net_stats *stats, bool gro) { - gro_result_t rc = GRO_NORMAL; struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = ndev_to_wil(ndev); struct wireless_dev *wdev = vif_to_wdev(vif); @@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, */ int mcast = is_multicast_ether_addr(da); struct sk_buff *xmit_skb = NULL; - static const char * const gro_res_str[] = { - [GRO_MERGED] = "GRO_MERGED", - [GRO_MERGED_FREE] = "GRO_MERGED_FREE", - [GRO_HELD] = "GRO_HELD", - [GRO_NORMAL] = "GRO_NORMAL", - [GRO_DROP] = "GRO_DROP", - [GRO_CONSUMED] = "GRO_CONSUMED", - }; if (wdev->iftype == NL80211_IFTYPE_STATION) { sa = wil_skb_get_sa(skb); if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { /* mcast packet looped back to us */ - rc = GRO_DROP; dev_kfree_skb(skb); - goto stats; + ndev->stats.rx_dropped++; + stats->rx_dropped++; + wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); + return; } } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) { if (mcast) { @@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid, wil_rx_handle_eapol(vif, skb); if (gro) - rc = napi_gro_receive(&wil->napi_rx, skb); + napi_gro_receive(&wil->napi_rx, skb); else netif_rx_ni(skb); - wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", - len, gro_res_str[rc]); - } -stats: - /* statistics. rc set to GRO_NORMAL for AP bridging */ - if (unlikely(rc == GRO_DROP)) { - ndev->stats.rx_dropped++; - stats->rx_dropped++; - wil_dbg_txrx(wil, "Rx drop %d bytes\n", len); - } else { - ndev->stats.rx_packets++; - stats->rx_packets++; - ndev->stats.rx_bytes += len; - stats->rx_bytes += len; - if (mcast) - ndev->stats.multicast++; } + ndev->stats.rx_packets++; + stats->rx_packets++; + ndev->stats.rx_bytes += len; + stats->rx_bytes += len; + if (mcast) + ndev->stats.multicast++; } void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ccbb5b43b8b2..4502f9c4708d 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -679,18 +679,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) return a->mode; } - if (a == &dev_attr_align.attr) { - int i; - - for (i = 0; i < nd_region->ndr_mappings; i++) { - struct nd_mapping *nd_mapping = &nd_region->mapping[i]; - struct nvdimm *nvdimm = nd_mapping->nvdimm; - - if (test_bit(NDD_LABELING, &nvdimm->flags)) - return a->mode; - } - return 0; - } + if (a == &dev_attr_align.attr) + return a->mode; if (a != &dev_attr_set_cookie.attr && a != &dev_attr_available_size.attr) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2c5bc4fb702..28f4388c1337 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1974,7 +1974,6 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); } #endif return 0; @@ -4174,6 +4173,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->dev = dev; ctrl->ops = ops; ctrl->quirks = quirks; + ctrl->numa_node = NUMA_NO_NODE; INIT_WORK(&ctrl->scan_work, nvme_scan_work); INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index da78e499947a..18d084ed497e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -409,15 +409,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) { struct nvme_ns_head *head = ns->head; - lockdep_assert_held(&ns->head->lock); - if (!head->disk) return; - if (!(head->disk->flags & GENHD_FL_UP)) + if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) device_add_disk(&head->subsys->dev, head->disk, nvme_ns_id_attr_groups); + mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; @@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) __nvme_find_path(head, node); srcu_read_unlock(&head->srcu, srcu_idx); } + mutex_unlock(&head->lock); - synchronize_srcu(&ns->head->srcu); - kblockd_schedule_work(&ns->head->requeue_work); + synchronize_srcu(&head->srcu); + kblockd_schedule_work(&head->requeue_work); } static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, @@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state) static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, struct nvme_ns *ns) { - mutex_lock(&ns->head->lock); ns->ana_grpid = le32_to_cpu(desc->grpid); ns->ana_state = desc->state; clear_bit(NVME_NS_ANA_PENDING, &ns->flags); if (nvme_state_is_live(ns->ana_state)) nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -640,31 +638,37 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); -static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, +static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { - struct nvme_ns *ns = data; + struct nvme_ana_group_desc *dst = data; - if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { - nvme_update_ns_ana_state(desc, ns); - return -ENXIO; /* just break out of the loop */ - } + if (desc->grpid != dst->grpid) + return 0; - return 0; + *dst = *desc; + return -ENXIO; /* just break out of the loop */ } void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) { if (nvme_ctrl_use_ana(ns->ctrl)) { + struct nvme_ana_group_desc desc = { + .grpid = id->anagrpid, + .state = 0, + }; + mutex_lock(&ns->ctrl->ana_lock); ns->ana_grpid = le32_to_cpu(id->anagrpid); - nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); mutex_unlock(&ns->ctrl->ana_lock); + if (desc.state) { + /* found the group desc: update */ + nvme_update_ns_ana_state(&desc, ns); + } } else { - mutex_lock(&ns->head->lock); ns->ana_state = NVME_ANA_OPTIMIZED; nvme_mpath_set_live(ns); - mutex_unlock(&ns->head->lock); } if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { @@ -686,6 +690,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); blk_cleanup_queue(head->disk->queue); + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * if device_add_disk wasn't called, prevent + * disk release to put a bogus reference on the + * request queue + */ + head->disk->queue = NULL; + } put_disk(head->disk); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index c0f4226d3299..2ef8d501e2a8 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -364,6 +364,8 @@ struct nvme_ns_head { spinlock_t requeue_lock; struct work_struct requeue_work; struct mutex lock; + unsigned long flags; +#define NVME_NSHEAD_DISK_LIVE 0 struct nvme_ns __rcu *current_path[]; #endif }; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index e2bacd369a88..b1d18f0633c7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1593,7 +1593,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; dev->admin_tagset.timeout = ADMIN_TIMEOUT; - dev->admin_tagset.numa_node = dev_to_node(dev->dev); + dev->admin_tagset.numa_node = dev->ctrl.numa_node; dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; dev->admin_tagset.driver_data = dev; @@ -1669,6 +1669,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) if (result) return result; + dev->ctrl.numa_node = dev_to_node(dev->dev); + nvmeq = &dev->queues[0]; aqa = nvmeq->q_depth - 1; aqa |= aqa << 16; @@ -2257,7 +2259,7 @@ static void nvme_dev_add(struct nvme_dev *dev) if (dev->io_queues[HCTX_TYPE_POLL]) dev->tagset.nr_maps++; dev->tagset.timeout = NVME_IO_TIMEOUT; - dev->tagset.numa_node = dev_to_node(dev->dev); + dev->tagset.numa_node = dev->ctrl.numa_node; dev->tagset.queue_depth = min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; dev->tagset.cmd_size = sizeof(struct nvme_iod); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f8f856dc0c67..13506a87a444 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) * Spread I/O queues completion vectors according their queue index. * Admin queues can always go on completion vector 0. */ - comp_vector = idx == 0 ? idx : idx - 1; + comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; /* Polling queues need direct cq polling context */ if (nvme_rdma_poll_queue(queue)) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3345ec7efaff..79ef2b8e2b3c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1532,7 +1532,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, set->ops = &nvme_tcp_admin_mq_ops; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; set->reserved_tags = 2; /* connect + keep-alive */ - set->numa_node = NUMA_NO_NODE; + set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); set->driver_data = ctrl; @@ -1544,7 +1544,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, set->ops = &nvme_tcp_mq_ops; set->queue_depth = nctrl->sqsize + 1; set->reserved_tags = 1; /* fabric connect */ - set->numa_node = NUMA_NO_NODE; + set->numa_node = nctrl->numa_node; set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; set->cmd_size = sizeof(struct nvme_tcp_request); set->driver_data = ctrl; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 0d54e730cbf2..6344e73c9354 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -340,7 +340,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ - ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; + ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); ctrl->admin_tag_set.driver_data = ctrl; @@ -512,7 +512,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.ops = &nvme_loop_mq_ops; ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; ctrl->tag_set.reserved_tags = 1; /* fabric connect */ - ctrl->tag_set.numa_node = NUMA_NO_NODE; + ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + NVME_INLINE_SG_CNT * sizeof(struct scatterlist); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a04afe79529c..ef6f818ce5b3 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -314,10 +314,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) child, addr); if (of_mdiobus_child_is_phy(child)) { + /* -ENODEV is the return code that PHYLIB has + * standardized on to indicate that bus + * scanning should continue. + */ rc = of_mdiobus_register_phy(mdio, child, addr); - if (rc && rc != -ENODEV) + if (!rc) + break; + if (rc != -ENODEV) goto unregister; - break; } } } diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index e1d097e250ae..31478c0cff87 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -33,7 +33,7 @@ void flush_cpu_work(void); struct op_sample { unsigned long eip; unsigned long event; - unsigned long data[0]; + unsigned long data[]; }; struct op_entry; diff --git a/drivers/phy/samsung/phy-samsung-usb2.h b/drivers/phy/samsung/phy-samsung-usb2.h index 2c1a7d71142b..77fb23bc218f 100644 --- a/drivers/phy/samsung/phy-samsung-usb2.h +++ b/drivers/phy/samsung/phy-samsung-usb2.h @@ -43,7 +43,7 @@ struct samsung_usb2_phy_driver { struct regmap *reg_pmu; struct regmap *reg_sys; spinlock_t lock; - struct samsung_usb2_phy_instance instances[0]; + struct samsung_usb2_phy_instance instances[]; }; struct samsung_usb2_common_phy { diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index cb7e0f08d2cf..1f81569c7ae3 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -824,13 +824,12 @@ int imx_pinctrl_probe(struct platform_device *pdev, return -EINVAL; } - ipctl->input_sel_base = devm_of_iomap(&pdev->dev, np, - 0, NULL); + ipctl->input_sel_base = of_iomap(np, 0); of_node_put(np); - if (IS_ERR(ipctl->input_sel_base)) { + if (!ipctl->input_sel_base) { dev_err(&pdev->dev, "iomuxc input select base address not found\n"); - return PTR_ERR(ipctl->input_sel_base); + return -ENOMEM; } } } diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c index e06fb885fd2b..1f47a661b0a7 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c +++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c @@ -126,10 +126,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev, copy->name = name; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy); - if (IS_ERR(mcp->regmap)) - return PTR_ERR(mcp->regmap); - - return 0; + return PTR_ERR_OR_ZERO(mcp->regmap); } static int mcp23s08_probe(struct spi_device *spi) diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 1e0614daee9b..f3a8a465d27e 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -958,7 +958,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, } /** - * smux_parse_one_pinctrl_entry() - parses a device tree mux entry + * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry * @pctldev: pin controller device * @pcs: pinctrl driver instance * @np: device node of the mux entry diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c index 38c33a778cb8..ec50a3b4bd16 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c @@ -367,7 +367,8 @@ static const char * const wci20_groups[] = { static const char * const qpic_pad_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio9", "gpio10", - "gpio11", "gpio17", + "gpio11", "gpio17", "gpio15", "gpio12", "gpio13", "gpio14", "gpio5", + "gpio6", "gpio7", "gpio8", }; static const char * const burn0_groups[] = { diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index fe0be8a6ebb7..092a48e4dff5 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -170,6 +170,7 @@ struct pmic_gpio_state { struct regmap *map; struct pinctrl_dev *ctrl; struct gpio_chip chip; + struct irq_chip irq; }; static const struct pinconf_generic_params pmic_gpio_bindings[] = { @@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, return 0; } -static struct irq_chip pmic_gpio_irq_chip = { - .name = "spmi-gpio", - .irq_ack = irq_chip_ack_parent, - .irq_mask = irq_chip_mask_parent, - .irq_unmask = irq_chip_unmask_parent, - .irq_set_type = irq_chip_set_type_parent, - .irq_set_wake = irq_chip_set_wake_parent, - .flags = IRQCHIP_MASK_ON_SUSPEND, -}; - static int pmic_gpio_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, @@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (!parent_domain) return -ENXIO; + state->irq.name = "spmi-gpio", + state->irq.irq_ack = irq_chip_ack_parent, + state->irq.irq_mask = irq_chip_mask_parent, + state->irq.irq_unmask = irq_chip_unmask_parent, + state->irq.irq_set_type = irq_chip_set_type_parent, + state->irq.irq_set_wake = irq_chip_set_wake_parent, + state->irq.flags = IRQCHIP_MASK_ON_SUSPEND, + girq = &state->chip.irq; - girq->chip = &pmic_gpio_irq_chip; + girq->chip = &state->irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; girq->fwnode = of_node_to_fwnode(state->dev->of_node); diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 21661f6490d6..195cfe557511 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -731,8 +731,8 @@ static int tegra_pinctrl_resume(struct device *dev) } const struct dev_pm_ops tegra_pinctrl_pm = { - .suspend = &tegra_pinctrl_suspend, - .resume = &tegra_pinctrl_resume + .suspend_noirq = &tegra_pinctrl_suspend, + .resume_noirq = &tegra_pinctrl_resume }; static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx) diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 0e90c5d4bb2b..eb8ed28533f8 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -39,7 +39,7 @@ struct rio_id_table { u16 start; /* logical minimal id */ u32 max; /* max number of IDs in table */ spinlock_t lock; - unsigned long table[0]; + unsigned long table[]; }; static int next_destid = 0; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 8f677f5d79b4..edb1c4f8b496 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -684,7 +684,7 @@ config REGULATOR_MT6323 config REGULATOR_MT6358 tristate "MediaTek MT6358 PMIC" - depends on MFD_MT6397 && BROKEN + depends on MFD_MT6397 help Say y here to select this option to enable the power regulator of MediaTek MT6358 PMIC. diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index e1d6c8f6d40b..fe65b5acaf28 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = { }, { DA9063_LDO(DA9063, LDO9, 950, 50, 3600), - .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL), }, { DA9063_LDO(DA9063, LDO11, 900, 50, 3600), diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index e970e9d2f8be..e4bb09bbd3fa 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev, continue; } - ret = selector + sel; + ret = selector + sel - range->min_sel; voltage = rdev->desc->ops->list_voltage(rdev, ret); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index 689537927f6f..4c8e8b472287 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { }; +static const struct regulator_ops pfuze3000_sw_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = pfuze100_set_ramp_delay, + +}; + #define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \ [_chip ## _ ## _name] = { \ .desc = { \ @@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = { .stby_mask = 0x20, \ } - -#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \ - .desc = { \ - .name = #_name,\ - .n_voltages = ((max) - (min)) / (step) + 1, \ - .ops = &pfuze100_sw_regulator_ops, \ - .type = REGULATOR_VOLTAGE, \ - .id = _chip ## _ ## _name, \ - .owner = THIS_MODULE, \ - .min_uV = (min), \ - .uV_step = (step), \ - .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ - .vsel_mask = 0x7, \ - }, \ - .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ - .stby_mask = 0x7, \ -} +/* No linar case for the some switches of PFUZE3000 */ +#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \ + [_chip ## _ ## _name] = { \ + .desc = { \ + .name = #_name, \ + .n_voltages = ARRAY_SIZE(voltages), \ + .ops = &pfuze3000_sw_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = _chip ## _ ## _name, \ + .owner = THIS_MODULE, \ + .volt_table = voltages, \ + .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \ + .vsel_mask = (mask), \ + .enable_reg = (base) + PFUZE100_MODE_OFFSET, \ + .enable_mask = 0xf, \ + .enable_val = 0x8, \ + .enable_time = 500, \ + }, \ + .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \ + .stby_mask = (mask), \ + .sw_reg = true, \ + } #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \ .desc = { \ @@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = { }; static struct pfuze_regulator pfuze3000_regulators[] = { - PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000), - PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst), PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), @@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = { }; static struct pfuze_regulator pfuze3001_regulators[] = { - PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), - PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), + PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a), + PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo), PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000), PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs), PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000), diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index eb13c479e11d..bb1c8402c67d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -182,10 +182,9 @@ enum qdio_irq_poll_states { }; struct qdio_input_q { - /* first ACK'ed buffer */ - int ack_start; - /* how many SBALs are acknowledged */ - int ack_count; + /* Batch of SBALs that we processed while polling the queue: */ + unsigned int batch_start; + unsigned int batch_count; /* last time of noticing incoming data */ u64 timestamp; }; diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 286b044fb027..da95c923d81a 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -110,8 +110,8 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "nr_used: %d ftc: %d\n", atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { - seq_printf(m, "ack start: %d ack count: %d\n", - q->u.in.ack_start, q->u.in.ack_count); + seq_printf(m, "batch start: %u batch count: %u\n", + q->u.in.batch_start, q->u.in.batch_count); seq_printf(m, "DSCI: %x IRQs disabled: %u\n", *(u8 *)q->irq_ptr->dsci, test_bit(QDIO_IRQ_DISABLED, diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 610c05f59589..0c919a11a46e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -254,10 +254,17 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr, if (is_qebsm(q)) return qdio_do_sqbs(q, state, bufnr, count); + /* Ensure that all preceding changes to the SBALs are visible: */ + mb(); + for (i = 0; i < count; i++) { - xchg(&q->slsb.val[bufnr], state); + WRITE_ONCE(q->slsb.val[bufnr], state); bufnr = next_buf(bufnr); } + + /* Make our SLSB changes visible: */ + mb(); + return count; } @@ -393,15 +400,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, static inline void qdio_stop_polling(struct qdio_q *q) { - if (!q->u.in.ack_count) + if (!q->u.in.batch_count) return; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = 0; + set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.batch_count); + q->u.in.batch_count = 0; } static inline void account_sbals(struct qdio_q *q, unsigned int count) @@ -441,42 +448,13 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start, static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, int count, bool auto_ack) { - int new; + /* ACK the newest SBAL: */ + if (!auto_ack) + set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK); - if (auto_ack) { - if (!q->u.in.ack_count) { - q->u.in.ack_count = count; - q->u.in.ack_start = start; - return; - } - - /* delete the previous ACK's */ - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = count; - q->u.in.ack_start = start; - return; - } - - /* - * ACK the newest buffer. The ACK will be removed in qdio_stop_polling - * or by the next inbound run. - */ - new = add_buf(start, count - 1); - set_buf_state(q, new, SLSB_P_INPUT_ACK); - - /* delete the previous ACKs */ - if (q->u.in.ack_count) - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - - q->u.in.ack_count = 1; - q->u.in.ack_start = new; - count--; - if (!count) - return; - /* need to change ALL buffers to get more interrupts */ - set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); + if (!q->u.in.batch_count) + q->u.in.batch_start = start; + q->u.in.batch_count += count; } static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) @@ -525,15 +503,18 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) account_sbals_error(q, count); return count; case SLSB_CU_INPUT_EMPTY: - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_ACK: if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", q->nr, start); return 0; + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + /* We should never see this state, throw a WARN: */ default: - WARN_ON_ONCE(1); + dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, + "found state %#x at index %u on queue %u\n", + state, start, q->nr); return 0; } } @@ -738,11 +719,14 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); return 0; - case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: return 0; + case SLSB_P_OUTPUT_NOT_INIT: + /* We should never see this state, throw a WARN: */ default: - WARN_ON_ONCE(1); + dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, + "found state %#x at index %u on queue %u\n", + state, start, q->nr); return 0; } } @@ -938,10 +922,10 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } -static void qdio_handle_activate_check(struct ccw_device *cdev, - unsigned long intparm, int cstat, int dstat) +static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, + unsigned long intparm, int cstat, + int dstat) { - struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); @@ -968,11 +952,9 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, lgr_info_log(); } -static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, +static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, int dstat) { - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); if (cstat) @@ -1019,7 +1001,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, switch (irq_ptr->state) { case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); + qdio_establish_handle_irq(irq_ptr, cstat, dstat); break; case QDIO_IRQ_STATE_CLEANUP: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); @@ -1031,7 +1013,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, return; } if (cstat || dstat) - qdio_handle_activate_check(cdev, intparm, cstat, + qdio_handle_activate_check(irq_ptr, intparm, cstat, dstat); break; case QDIO_IRQ_STATE_STOPPED: @@ -1446,12 +1428,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, qperf_inc(q, inbound_call); - /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */ - overlap = min(count - sub_buf(q->u.in.ack_start, bufnr), - q->u.in.ack_count); + /* If any processed SBALs are returned to HW, adjust our tracking: */ + overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr), + q->u.in.batch_count); if (overlap > 0) { - q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap); - q->u.in.ack_count -= overlap; + q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap); + q->u.in.batch_count -= overlap; } count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); @@ -1535,12 +1517,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr, unsigned int bufnr, unsigned int count) { - struct qdio_irq *irq_ptr; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) return -EINVAL; - irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index 004ce022fc78..3c3d403abe92 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -195,11 +195,10 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len) size_t len = sizeof(struct ep11_cprb) + payload_len; struct ep11_cprb *cprb; - cprb = kmalloc(len, GFP_KERNEL); + cprb = kzalloc(len, GFP_KERNEL); if (!cprb) return NULL; - memset(cprb, 0, len); cprb->cprb_len = sizeof(struct ep11_cprb); cprb->cprb_ver_id = 0x04; memcpy(cprb->func_id, "T4", 2); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 18a0fb75a710..88e998de2d03 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4544,9 +4544,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); - if (cmd->hdr.return_code) - return -EIO; - qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_CARD_TEXT_(card, 2, "rc=%d", @@ -4556,7 +4553,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", access_ctrl_req->subcmd_code, CARD_DEVID(card), cmd->data.setadapterparms.hdr.return_code); - switch (cmd->data.setadapterparms.hdr.return_code) { + switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: if (card->options.isolation == ISOLATION_MODE_NONE) { dev_info(&card->gdev->dev, @@ -6840,9 +6837,11 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { + struct qeth_card *card = dev->ml_priv; + /* Traffic with local next-hop is not eligible for some offloads: */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - struct qeth_card *card = dev->ml_priv; + if (skb->ip_summed == CHECKSUM_PARTIAL && + card->options.isolation != ISOLATION_MODE_FWD) { netdev_features_t restricted = 0; if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index db320dab1fee..79f6e8fb03ca 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -577,7 +577,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) ZFCP_STATUS_ERP_TIMEDOUT)) { req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_dbf_rec_run("erscf_1", act); - req->erp_action = NULL; + /* lock-free concurrent access with + * zfcp_erp_timeout_handler() + */ + WRITE_ONCE(req->erp_action, NULL); } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_dbf_rec_run("erscf_2", act); @@ -613,8 +616,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask) void zfcp_erp_timeout_handler(struct timer_list *t) { struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); - struct zfcp_erp_action *act = fsf_req->erp_action; + struct zfcp_erp_action *act; + if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) + return; + /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */ + act = READ_ONCE(fsf_req->erp_action); + if (!act) + return; zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT); } diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 957889a42d2e..5730572b52cd 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -1372,27 +1372,6 @@ static struct ccw_device_id virtio_ids[] = { {}, }; -#ifdef CONFIG_PM_SLEEP -static int virtio_ccw_freeze(struct ccw_device *cdev) -{ - struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); - - return virtio_device_freeze(&vcdev->vdev); -} - -static int virtio_ccw_restore(struct ccw_device *cdev) -{ - struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); - int ret; - - ret = virtio_ccw_set_transport_rev(vcdev); - if (ret) - return ret; - - return virtio_device_restore(&vcdev->vdev); -} -#endif - static struct ccw_driver virtio_ccw_driver = { .driver = { .owner = THIS_MODULE, @@ -1405,11 +1384,6 @@ static struct ccw_driver virtio_ccw_driver = { .set_online = virtio_ccw_online, .notify = virtio_ccw_cio_notify, .int_class = IRQIO_VIR, -#ifdef CONFIG_PM_SLEEP - .freeze = virtio_ccw_freeze, - .thaw = virtio_ccw_restore, - .restore = virtio_ccw_restore, -#endif }; static int __init pure_hex(char **cp, unsigned int *val, int min_digit, diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index d022407e5645..bef47f38dd0d 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -40,6 +40,7 @@ static struct scsi_host_template aic94xx_sht = { /* .name is initialized */ .name = "aic94xx", .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = asd_scan_finished, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 2e1718f9ade2..09a7669dad4c 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1756,6 +1756,7 @@ static struct scsi_host_template sht_v1_hw = { .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = hisi_sas_slave_configure, .scan_finished = hisi_sas_scan_finished, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index e7e7849a4c14..968d38702353 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3532,6 +3532,7 @@ static struct scsi_host_template sht_v2_hw = { .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = hisi_sas_slave_configure, .scan_finished = hisi_sas_scan_finished, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3e6b78a1f993..55e2321a65bc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -3075,6 +3075,7 @@ static struct scsi_host_template sht_v3_hw = { .proc_name = DRV_NAME, .module = THIS_MODULE, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = hisi_sas_slave_configure, .scan_finished = hisi_sas_scan_finished, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7d77997d26d4..7d86f4ca266c 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6731,6 +6731,7 @@ static struct scsi_host_template driver_template = { .compat_ioctl = ipr_ioctl, #endif .queuecommand = ipr_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, .eh_host_reset_handler = ipr_eh_host_reset, diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 974c3b9116d5..085e285f427d 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -153,6 +153,7 @@ static struct scsi_host_template isci_sht = { .name = DRV_NAME, .proc_name = DRV_NAME, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = isci_host_scan_finished, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 69a5249e007a..6637f84a3d1b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -11878,7 +11878,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) lpfc_sli4_xri_exchange_busy_wait(phba); /* per-phba callback de-registration for hotplug event */ - lpfc_cpuhp_remove(phba); + if (phba->pport) + lpfc_cpuhp_remove(phba); /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 5973eed94938..b0de3bdb01db 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -33,6 +33,7 @@ static struct scsi_host_template mvs_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = mvs_scan_finished, diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index a8f5344fdfda..9e99262a2b9d 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -87,6 +87,7 @@ static struct scsi_host_template pm8001_sht = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = sas_queuecommand, + .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, .slave_configure = sas_slave_configure, .scan_finished = pm8001_scan_finished, diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 42c3ad27f1cb..df670fba2ab8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || - fcport->scan_needed) { + (fcport->scan_needed && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_NVME_INITIATOR)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index d66d47a0f958..fa695a4007f8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) sp->priv = NULL; if (priv->comp_status == QLA_SUCCESS) { fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->status = NVME_SC_SUCCESS; } else { fd->rcv_rsplen = 0; fd->transferred_length = 0; + fd->status = NVME_SC_INTERNAL; } - fd->status = 0; spin_unlock_irqrestore(&priv->cmd_lock, flags); fd->done(fd); diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index 53dd87628cbe..516a7f573942 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job) desc_op = bsg_request->upiu_req.qr.opcode; ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, &desc_len, desc_op); - if (ret) + if (ret) { + pm_runtime_put_sync(hba->dev); goto out; + } /* fall through */ case UPIU_TRANSACTION_NOP_OUT: diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 7b0759adb47d..cc57a384d74d 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -22,6 +22,8 @@ #define OCOTP_UID_LOW 0x410 #define OCOTP_UID_HIGH 0x420 +#define IMX8MP_OCOTP_UID_OFFSET 0x10 + /* Same as ANADIG_DIGPROG_IMX7D */ #define ANADIG_DIGPROG_IMX8MM 0x800 @@ -87,6 +89,8 @@ static void __init imx8mm_soc_uid(void) { void __iomem *ocotp_base; struct device_node *np; + u32 offset = of_machine_is_compatible("fsl,imx8mp") ? + IMX8MP_OCOTP_UID_OFFSET : 0; np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); if (!np) @@ -95,9 +99,9 @@ static void __init imx8mm_soc_uid(void) ocotp_base = of_iomap(np, 0); WARN_ON(!ocotp_base); - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); iounmap(ocotp_base); of_node_put(np); @@ -146,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = { .soc_revision = imx8mm_soc_revision, }; -static const struct of_device_id imx8_soc_match[] = { +static __maybe_unused const struct of_device_id imx8_soc_match[] = { { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, }, { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, }, diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 038aec352df7..a01eda720bf6 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -67,7 +67,7 @@ struct knav_reg_config { u32 link_ram_size0; u32 link_ram_base1; u32 __pad2[2]; - u32 starvation[0]; + u32 starvation[]; }; struct knav_reg_region { diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c index 96c6f777519c..c9b3f9ebf0bb 100644 --- a/drivers/soc/ti/omap_prm.c +++ b/drivers/soc/ti/omap_prm.c @@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev, goto exit; /* wait for the status to be set */ - ret = readl_relaxed_poll_timeout(reset->prm->base + - reset->prm->data->rstst, - v, v & BIT(st_bit), 1, - OMAP_RESET_MAX_WAIT); + ret = readl_relaxed_poll_timeout_atomic(reset->prm->base + + reset->prm->data->rstst, + v, v & BIT(st_bit), 1, + OMAP_RESET_MAX_WAIT); if (ret) pr_err("%s: timedout waiting for %s:%lu\n", __func__, reset->prm->data->name, id); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index a35faced0456..58190c94561f 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi) return; if (dma->chan_tx) { - dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys, - dma_bufsize, DMA_TO_DEVICE); + dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize, + dma->tx_dma_buf, dma->tx_dma_phys); dma_release_channel(dma->chan_tx); } if (dma->chan_rx) { - dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys, - dma_bufsize, DMA_FROM_DEVICE); + dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize, + dma->rx_dma_buf, dma->rx_dma_phys); dma_release_channel(dma->chan_rx); } } diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 06192c9ea813..cbc2387d450c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -179,7 +179,7 @@ struct rspi_data { void __iomem *addr; - u32 max_speed_hz; + u32 speed_hz; struct spi_controller *ctlr; struct platform_device *pdev; wait_queue_head_t wait; @@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), - 2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set 16-bit word access, 1 frame */ @@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size) clksrc = clk_get_rate(rspi->clk); while (div < 3) { - if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */ + if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */ break; div++; clksrc /= 2; } /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1; + spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1; rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); rspi->spcmd |= div << 2; @@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size) rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); /* Sets transfer bit rate */ - spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz); + spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz); rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); /* Disable dummy transmission, set byte access */ @@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr, { struct rspi_data *rspi = spi_controller_get_devdata(ctlr); struct spi_device *spi = msg->spi; + const struct spi_transfer *xfer; int ret; - rspi->max_speed_hz = spi->max_speed_hz; + /* + * As the Bit Rate Register must not be changed while the device is + * active, all transfers in a message must use the same bit rate. + * In theory, the sequencer could be enabled, and each Command Register + * could divide the base bit rate by a different value. + * However, most RSPI variants do not have Transfer Data Length + * Multiplier Setting Registers, so each sequence step would be limited + * to a single word, making this feature unsuitable for large + * transfers, which would gain most from it. + */ + rspi->speed_hz = spi->max_speed_hz; + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->speed_hz < rspi->speed_hz) + rspi->speed_hz = xfer->speed_hz; + } rspi->spcmd = SPCMD_SSLKP; if (spi->mode & SPI_CPOL) diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index 88e6543648cb..bd23c4689b46 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this, sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val); /* Load the watchdog timeout value, 50ms is always enough. */ + sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW, WDG_LOAD_VAL & WDG_LOAD_MASK); - sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0); /* Start the watchdog to reset system */ sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val); diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 3c44bb2fd9b1..a900962b4336 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = { .exec_op = stm32_qspi_exec_op, }; -static void stm32_qspi_release(struct stm32_qspi *qspi) -{ - pm_runtime_get_sync(qspi->dev); - /* disable qspi */ - writel_relaxed(0, qspi->io_base + QSPI_CR); - stm32_qspi_dma_free(qspi); - mutex_destroy(&qspi->lock); - pm_runtime_put_noidle(qspi->dev); - pm_runtime_disable(qspi->dev); - pm_runtime_set_suspended(qspi->dev); - pm_runtime_dont_use_autosuspend(qspi->dev); - clk_disable_unprepare(qspi->clk); -} - static int stm32_qspi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) if (IS_ERR(rstc)) { ret = PTR_ERR(rstc); if (ret == -EPROBE_DEFER) - goto err_qspi_release; + goto err_clk_disable; } else { reset_control_assert(rstc); udelay(2); @@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qspi); ret = stm32_qspi_dma_setup(qspi); if (ret) - goto err_qspi_release; + goto err_dma_free; mutex_init(&qspi->lock); @@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev) ret = devm_spi_register_master(dev, ctrl); if (ret) - goto err_qspi_release; + goto err_pm_runtime_free; pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return 0; -err_qspi_release: - stm32_qspi_release(qspi); +err_pm_runtime_free: + pm_runtime_get_sync(qspi->dev); + /* disable qspi */ + writel_relaxed(0, qspi->io_base + QSPI_CR); + mutex_destroy(&qspi->lock); + pm_runtime_put_noidle(qspi->dev); + pm_runtime_disable(qspi->dev); + pm_runtime_set_suspended(qspi->dev); + pm_runtime_dont_use_autosuspend(qspi->dev); +err_dma_free: + stm32_qspi_dma_free(qspi); +err_clk_disable: + clk_disable_unprepare(qspi->clk); err_master_put: spi_master_put(qspi->ctrl); @@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev) { struct stm32_qspi *qspi = platform_get_drvdata(pdev); - stm32_qspi_release(qspi); + pm_runtime_get_sync(qspi->dev); + /* disable qspi */ + writel_relaxed(0, qspi->io_base + QSPI_CR); + stm32_qspi_dma_free(qspi); + mutex_destroy(&qspi->lock); + pm_runtime_put_noidle(qspi->dev); + pm_runtime_disable(qspi->dev); + pm_runtime_set_suspended(qspi->dev); + pm_runtime_dont_use_autosuspend(qspi->dev); + clk_disable_unprepare(qspi->clk); return 0; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index d753df700e9e..59e07675ef86 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -609,15 +609,20 @@ static int spidev_open(struct inode *inode, struct file *filp) static int spidev_release(struct inode *inode, struct file *filp) { struct spidev_data *spidev; + int dofree; mutex_lock(&device_list_lock); spidev = filp->private_data; filp->private_data = NULL; + spin_lock_irq(&spidev->spi_lock); + /* ... after we unbound from the underlying device? */ + dofree = (spidev->spi == NULL); + spin_unlock_irq(&spidev->spi_lock); + /* last close? */ spidev->users--; if (!spidev->users) { - int dofree; kfree(spidev->tx_buffer); spidev->tx_buffer = NULL; @@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp) kfree(spidev->rx_buffer); spidev->rx_buffer = NULL; - spin_lock_irq(&spidev->spi_lock); - if (spidev->spi) - spidev->speed_hz = spidev->spi->max_speed_hz; - - /* ... after we unbound from the underlying device? */ - dofree = (spidev->spi == NULL); - spin_unlock_irq(&spidev->spi_lock); - if (dofree) kfree(spidev); + else + spidev->speed_hz = spidev->spi->max_speed_hz; } #ifdef CONFIG_SPI_SLAVE - spi_slave_abort(spidev->spi); + if (!dofree) + spi_slave_abort(spidev->spi); #endif mutex_unlock(&device_list_lock); @@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi) { struct spidev_data *spidev = spi_get_drvdata(spi); + /* prevent new opens */ + mutex_lock(&device_list_lock); /* make sure ops on existing fds can abort cleanly */ spin_lock_irq(&spidev->spi_lock); spidev->spi = NULL; spin_unlock_irq(&spidev->spi_lock); - /* prevent new opens */ - mutex_lock(&device_list_lock); list_del(&spidev->device_entry); device_destroy(spidev_class, spidev->devt); clear_bit(MINOR(spidev->devt), minors); diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index 69bcd172b298..a3ea7ce3e12e 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1824,12 +1824,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (!pIE) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c index 893b67f2f792..5110f9b93762 100644 --- a/drivers/staging/wfx/hif_tx.c +++ b/drivers/staging/wfx/hif_tx.c @@ -240,7 +240,7 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, } int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, - int chan_start_idx, int chan_num) + int chan_start_idx, int chan_num, int *timeout) { int ret, i; struct hif_msg *hif; @@ -289,11 +289,13 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req, tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay; tmo_chan_fg *= body->num_of_probe_requests; tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU; + if (timeout) + *timeout = usecs_to_jiffies(tmo); wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len); ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false); kfree(hif); - return ret ? ret : usecs_to_jiffies(tmo); + return ret; } int hif_stop_scan(struct wfx_vif *wvif) diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h index e9eca9330178..e1da28aef706 100644 --- a/drivers/staging/wfx/hif_tx.h +++ b/drivers/staging/wfx/hif_tx.h @@ -42,7 +42,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *buf, size_t buf_size); int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211, - int chan_start, int chan_num); + int chan_start, int chan_num, int *timeout); int hif_stop_scan(struct wfx_vif *wvif); int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf, struct ieee80211_channel *channel, const u8 *ssid, int ssidlen); diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c index 3248ecefda56..93ea2b72febd 100644 --- a/drivers/staging/wfx/queue.c +++ b/drivers/staging/wfx/queue.c @@ -246,7 +246,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev) for (i = 0; i < IEEE80211_NUM_ACS; i++) { sorted_queues[i] = &wdev->tx_queue[i]; for (j = i; j > 0; j--) - if (atomic_read(&sorted_queues[j]->pending_frames) > + if (atomic_read(&sorted_queues[j]->pending_frames) < atomic_read(&sorted_queues[j - 1]->pending_frames)) swap(sorted_queues[j - 1], sorted_queues[j]); } @@ -291,15 +291,12 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev) if (atomic_read(&wdev->tx_lock)) return NULL; - - for (;;) { - skb = wfx_tx_queues_get_skb(wdev); - if (!skb) - return NULL; - skb_queue_tail(&wdev->tx_pending, skb); - wake_up(&wdev->tx_dequeue); - tx_priv = wfx_skb_tx_priv(skb); - tx_priv->xmit_timestamp = ktime_get(); - return (struct hif_msg *)skb->data; - } + skb = wfx_tx_queues_get_skb(wdev); + if (!skb) + return NULL; + skb_queue_tail(&wdev->tx_pending, skb); + wake_up(&wdev->tx_dequeue); + tx_priv = wfx_skb_tx_priv(skb); + tx_priv->xmit_timestamp = ktime_get(); + return (struct hif_msg *)skb->data; } diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c index 57ea9997800b..e9de19784865 100644 --- a/drivers/staging/wfx/scan.c +++ b/drivers/staging/wfx/scan.c @@ -56,10 +56,10 @@ static int send_scan_req(struct wfx_vif *wvif, wfx_tx_lock_flush(wvif->wdev); wvif->scan_abort = false; reinit_completion(&wvif->scan_complete); - timeout = hif_scan(wvif, req, start_idx, i - start_idx); - if (timeout < 0) { + ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout); + if (ret) { wfx_tx_unlock(wvif->wdev); - return timeout; + return -EIO; } ret = wait_for_completion_timeout(&wvif->scan_complete, timeout); if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower) diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c index 5022447afa23..6004c0c1d173 100644 --- a/drivers/tty/serial/kgdb_nmi.c +++ b/drivers/tty/serial/kgdb_nmi.c @@ -50,7 +50,7 @@ static int kgdb_nmi_console_setup(struct console *co, char *options) * I/O utilities that messages sent to the console will automatically * be displayed on the dbg_io. */ - dbg_io_ops->is_console = true; + dbg_io_ops->cons = co; return 0; } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 41396982e9e0..84ffede27f23 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -45,7 +45,6 @@ static struct platform_device *kgdboc_pdev; #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) static struct kgdb_io kgdboc_earlycon_io_ops; -static struct console *earlycon; static int (*earlycon_orig_exit)(struct console *con); #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ @@ -145,7 +144,7 @@ static void kgdboc_unregister_kbd(void) #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) static void cleanup_earlycon(void) { - if (earlycon) + if (kgdboc_earlycon_io_ops.cons) kgdb_unregister_io_module(&kgdboc_earlycon_io_ops); } #else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ @@ -178,7 +177,7 @@ static int configure_kgdboc(void) goto noconfig; } - kgdboc_io_ops.is_console = 0; + kgdboc_io_ops.cons = NULL; kgdb_tty_driver = NULL; kgdboc_use_kms = 0; @@ -198,7 +197,7 @@ static int configure_kgdboc(void) int idx; if (cons->device && cons->device(cons, &idx) == p && idx == tty_line) { - kgdboc_io_ops.is_console = 1; + kgdboc_io_ops.cons = cons; break; } } @@ -433,7 +432,8 @@ static int kgdboc_earlycon_get_char(void) { char c; - if (!earlycon->read(earlycon, &c, 1)) + if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons, + &c, 1)) return NO_POLL_CHAR; return c; @@ -441,7 +441,8 @@ static int kgdboc_earlycon_get_char(void) static void kgdboc_earlycon_put_char(u8 chr) { - earlycon->write(earlycon, &chr, 1); + kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr, + 1); } static void kgdboc_earlycon_pre_exp_handler(void) @@ -461,7 +462,7 @@ static void kgdboc_earlycon_pre_exp_handler(void) * boot if we detect this case. */ for_each_console(con) - if (con == earlycon) + if (con == kgdboc_earlycon_io_ops.cons) return; already_warned = true; @@ -484,25 +485,25 @@ static int kgdboc_earlycon_deferred_exit(struct console *con) static void kgdboc_earlycon_deinit(void) { - if (!earlycon) + if (!kgdboc_earlycon_io_ops.cons) return; - if (earlycon->exit == kgdboc_earlycon_deferred_exit) + if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit) /* * kgdboc_earlycon is exiting but original boot console exit * was never called (AKA kgdboc_earlycon_deferred_exit() * didn't ever run). Undo our trap. */ - earlycon->exit = earlycon_orig_exit; - else if (earlycon->exit) + kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit; + else if (kgdboc_earlycon_io_ops.cons->exit) /* * We skipped calling the exit() routine so we could try to * keep using the boot console even after it went away. We're * finally done so call the function now. */ - earlycon->exit(earlycon); + kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons); - earlycon = NULL; + kgdboc_earlycon_io_ops.cons = NULL; } static struct kgdb_io kgdboc_earlycon_io_ops = { @@ -511,7 +512,6 @@ static struct kgdb_io kgdboc_earlycon_io_ops = { .write_char = kgdboc_earlycon_put_char, .pre_exception = kgdboc_earlycon_pre_exp_handler, .deinit = kgdboc_earlycon_deinit, - .is_console = true, }; #define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name) @@ -557,10 +557,10 @@ static int __init kgdboc_earlycon_init(char *opt) goto unlock; } - earlycon = con; + kgdboc_earlycon_io_ops.cons = con; pr_info("Going to register kgdb with earlycon '%s'\n", con->name); if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) { - earlycon = NULL; + kgdboc_earlycon_io_ops.cons = NULL; pr_info("Failed to register kgdb with earlycon\n"); } else { /* Trap exit so we can keep earlycon longer if needed. */ diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 04a522f5ae58..4bca730f6876 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, if (!set || (tmode & 0xff) != 0) return -EINVAL; - switch (tmode >> 8) { + tmode >>= 8; + switch (tmode) { case USB_TEST_J: case USB_TEST_K: case USB_TEST_SE0_NAK: @@ -704,15 +705,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, int ret = 0; u8 zlp = 0; + spin_lock_irqsave(&priv_dev->lock, flags); trace_cdns3_ep0_queue(priv_dev, request); /* cancel the request if controller receive new SETUP packet. */ - if (cdns3_check_new_setup(priv_dev)) + if (cdns3_check_new_setup(priv_dev)) { + spin_unlock_irqrestore(&priv_dev->lock, flags); return -ECONNRESET; + } /* send STATUS stage. Should be called only for SET_CONFIGURATION */ if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { - spin_lock_irqsave(&priv_dev->lock, flags); cdns3_select_ep(priv_dev, 0x00); erdy_sent = !priv_dev->hw_configured_flag; @@ -737,7 +740,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, return 0; } - spin_lock_irqsave(&priv_dev->lock, flags); if (!list_empty(&priv_ep->pending_req_list)) { dev_err(priv_dev->dev, "can't handle multiple requests for ep0\n"); diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index 8d121e207fd8..755c56582257 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, __dynamic_array(char, str, CDNS3_MSG_MAX) ), TP_fast_assign( - __entry->ep_dir = priv_dev->ep0_data_dir; + __entry->ep_dir = priv_dev->selected_ep; __entry->ep_sts = ep_sts; ), TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str), diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f67088bb8218..d5187b50fc82 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 20dccf34182d..870df71d1827 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 38fc46b0c026..1def9000f936 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index e571c8ae65ec..c347d93eae64 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -575,6 +575,17 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + dwc2_hsotg_remove(hsotg); + goto error_init; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error_init: diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 48b68b6f0dc8..90bb022737da 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = { .suspend_clk_idx = -1, }; -static const struct dwc3_exynos_driverdata exynos5420_drvdata = { - .clk_names = { "usbdrd30", "usbdrd30_susp_clk"}, - .num_clks = 2, - .suspend_clk_idx = 1, -}; - static const struct dwc3_exynos_driverdata exynos5433_drvdata = { .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" }, .num_clks = 4, @@ -184,9 +178,6 @@ static const struct of_device_id exynos_dwc3_match[] = { { .compatible = "samsung,exynos5250-dwusb3", .data = &exynos5250_drvdata, - }, { - .compatible = "samsung,exynos5420-dwusb3", - .data = &exynos5420_drvdata, }, { .compatible = "samsung,exynos5433-dwusb3", .data = &exynos5433_drvdata, diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index b67372737dc9..96c05b121fac 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -206,8 +206,10 @@ static void dwc3_pci_resume_work(struct work_struct *work) int ret; ret = pm_runtime_get_sync(&dwc3->dev); - if (ret) + if (ret) { + pm_runtime_put_sync_autosuspend(&dwc3->dev); return; + } pm_runtime_mark_last_busy(&dwc3->dev); pm_runtime_put_sync_autosuspend(&dwc3->dev); diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index ea0d531c63e2..775cf70cfb3e 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -1058,7 +1058,8 @@ static int __init kgdbdbgp_parse_config(char *str) kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10); } kgdb_register_io_module(&kgdbdbgp_io_ops); - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1; + if (early_dbgp_console.index != -1) + kgdbdbgp_io_ops.cons = &early_dbgp_console; return 0; } diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 69289717d856..2384b55f9d3e 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index a4e9abcbdc4f..1a9b7572e17f 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 3c3820ad9092..af3c1b9b38b2 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index cff965240327..b91d50da6127 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index bfbdb3ceed29..4311d4c9b68d 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; device_init_wakeup(&dev->dev, false); @@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index bee5deccc83d..ed468eed299c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, int hird, exit_latency; int ret; + if (xhci->quirks & XHCI_HW_LPM_DISABLE) + return -EPERM; + if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || !udev->lpm_capable) return -EPERM; @@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", enable ? "enable" : "disable", port_num + 1); - if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) { + if (enable) { /* Host supports BESL timeout instead of HIRD */ if (udev->usb2_hw_lpm_besl_capable) { /* if device doesn't have a preferred BESL value use a @@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(ports[port_num]->addr, pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 2c6c4f8d1ee1..c295e8a7f5ae 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -716,7 +716,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 98ada1a3425c..bae88893ee8e 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); dev_dbg(&intf->dev, "disconnect\n"); + kfree(dev->buf); kfree(dev); } diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index cffe2aced488..03a333797382 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c @@ -1199,11 +1199,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tegra_phy); - err = usb_add_phy_dev(&tegra_phy->u_phy); - if (err) - return err; - - return 0; + return usb_add_phy_dev(&tegra_phy->u_phy); } static int tegra_usb_phy_remove(struct platform_device *pdev) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 01c6a48c41bc..ac9a81ae8216 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) return info->dma_map_ctrl(chan->device->dev, pkt, map); } -static void usbhsf_dma_complete(void *arg); +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result); static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; @@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; + dma_cookie_t cookie; fifo = usbhs_pipe_to_fifo(pipe); if (!fifo) @@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt) if (!desc) return; - desc->callback = usbhsf_dma_complete; - desc->callback_param = pipe; + desc->callback_result = usbhsf_dma_complete; + desc->callback_param = pkt; - pkt->cookie = dmaengine_submit(desc); - if (pkt->cookie < 0) { + cookie = dmaengine_submit(desc); + if (cookie < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); return; } @@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt, struct dma_chan *chan, int dtln) { struct usbhs_pipe *pipe = pkt->pipe; - struct dma_tx_state state; size_t received_size; int maxp = usbhs_pipe_get_maxpacket(pipe); - dmaengine_tx_status(chan, pkt->cookie, &state); - received_size = pkt->length - state.residue; + received_size = pkt->length - pkt->dma_result->residue; if (dtln) { received_size -= USBHS_USB_DMAC_XFER_SIZE; @@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv, return 0; } -static void usbhsf_dma_complete(void *arg) +static void usbhsf_dma_complete(void *arg, + const struct dmaengine_result *result) { - struct usbhs_pipe *pipe = arg; + struct usbhs_pkt *pkt = arg; + struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct device *dev = usbhs_priv_to_dev(priv); int ret; + pkt->dma_result = result; ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); if (ret < 0) dev_err(dev, "dma_complete run_error %d : %d\n", diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h index 7d3700bf41d9..039a2b993157 100644 --- a/drivers/usb/renesas_usbhs/fifo.h +++ b/drivers/usb/renesas_usbhs/fifo.h @@ -50,7 +50,7 @@ struct usbhs_pkt { struct usbhs_pkt *pkt); struct work_struct work; dma_addr_t dma; - dma_cookie_t cookie; + const struct dmaengine_result *dma_result; void *buf; int length; int trans; diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 962bc69a6a59..70ddc9d6d49e 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -148,7 +148,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state) msg[0] = PMC_USB_DP_HPD; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; - msg[1] = PMC_USB_DP_HPD_IRQ; + if (data->status & DP_STATUS_IRQ_HPD) + msg[1] = PMC_USB_DP_HPD_IRQ; if (data->status & DP_STATUS_HPD_STATE) msg[1] |= PMC_USB_DP_HPD_LVL; @@ -161,6 +162,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) { struct typec_displayport_data *data = state->data; struct altmode_req req = { }; + int ret; if (data->status & DP_STATUS_IRQ_HPD) return pmc_usb_mux_dp_hpd(port, state); @@ -181,7 +183,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) if (data->status & DP_STATUS_HPD_STATE) req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH; - return pmc_usb_command(port, (void *)&req, sizeof(req)); + ret = pmc_usb_command(port, (void *)&req, sizeof(req)); + if (ret) + return ret; + + if (data->status & DP_STATUS_HPD_STATE) + return pmc_usb_mux_dp_hpd(port, state); + + return 0; } static int diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 017389021b96..b56a0880a044 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -179,26 +179,6 @@ static irqreturn_t rt1711h_irq(int irq, void *dev_id) return tcpci_irq(chip->tcpci); } -static int rt1711h_init_alert(struct rt1711h_chip *chip, - struct i2c_client *client) -{ - int ret; - - /* Disable chip interrupts before requesting irq */ - ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); - if (ret < 0) - return ret; - - ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, - rt1711h_irq, - IRQF_ONESHOT | IRQF_TRIGGER_LOW, - dev_name(chip->dev), chip); - if (ret < 0) - return ret; - enable_irq_wake(client->irq); - return 0; -} - static int rt1711h_sw_reset(struct rt1711h_chip *chip) { int ret; @@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = rt1711h_init_alert(chip, client); + /* Disable chip interrupts before requesting irq */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0); if (ret < 0) return ret; @@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client, if (IS_ERR_OR_NULL(chip->tcpci)) return PTR_ERR(chip->tcpci); + ret = devm_request_threaded_irq(chip->dev, client->irq, NULL, + rt1711h_irq, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + dev_name(chip->dev), chip); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); + return 0; } diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index ff6562f602e0..de211ef3738c 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d) * @config: the bus operations that is supported by this device * @size: size of the parent structure that contains private data * - * Drvier should use vdap_alloc_device() wrapper macro instead of + * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. * * Returns an error when parent/config/dma_dev is not set or fail to get diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 7c0779018b1b..f634c81998bb 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -521,10 +521,14 @@ static void vfio_pci_release(void *device_data) vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); - if (vdev->err_trigger) + if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); - if (vdev->req_trigger) + vdev->err_trigger = NULL; + } + if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } } mutex_unlock(&vdev->reflck->lock); diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 8746c943247a..d98843feddce 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -398,9 +398,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) /* Caller should hold memory_lock semaphore */ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) { + struct pci_dev *pdev = vdev->pdev; u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); - return cmd & PCI_COMMAND_MEMORY; + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); } /* @@ -1728,6 +1734,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 0466921f4772..a09dedc79f68 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features) return 0; } +static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd) +{ + static void *backend; + + const bool enable = fd != -1; + struct vhost_virtqueue *vq; + int r; + + mutex_lock(&n->dev.mutex); + r = vhost_dev_check_owner(&n->dev); + if (r) + goto err; + + if (index >= VHOST_TEST_VQ_MAX) { + r = -ENOBUFS; + goto err; + } + vq = &n->vqs[index]; + mutex_lock(&vq->mutex); + + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(vq)) { + r = -EFAULT; + goto err_vq; + } + if (!enable) { + vhost_poll_stop(&vq->poll); + backend = vhost_vq_get_backend(vq); + vhost_vq_set_backend(vq, NULL); + } else { + vhost_vq_set_backend(vq, backend); + r = vhost_vq_init_access(vq); + if (r == 0) + r = vhost_poll_start(&vq->poll, vq->kick); + } + + mutex_unlock(&vq->mutex); + + if (enable) { + vhost_test_flush_vq(n, index); + } + + mutex_unlock(&n->dev.mutex); + return 0; + +err_vq: + mutex_unlock(&vq->mutex); +err: + mutex_unlock(&n->dev.mutex); + return r; +} + static long vhost_test_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { + struct vhost_vring_file backend; struct vhost_test *n = f->private_data; void __user *argp = (void __user *)arg; u64 __user *featurep = argp; @@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl, if (copy_from_user(&test, argp, sizeof test)) return -EFAULT; return vhost_test_run(n, test); + case VHOST_TEST_SET_BACKEND: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + return vhost_test_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h index 7dd265bfdf81..822bc4bee03a 100644 --- a/drivers/vhost/test.h +++ b/drivers/vhost/test.h @@ -4,5 +4,6 @@ /* Start a given test on the virtio null device. 0 stops all tests. */ #define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int) +#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int) #endif diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 7580e34f76c1..a54b60d6623f 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; struct vdpa_notification_area notify; - int index = vma->vm_pgoff; + unsigned long index = vma->vm_pgoff; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index e8ab583e5098..113116d3585c 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -107,7 +107,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data) /* TG LCD GVSS */ tosa_tg_send(spi, TG_PINICTL, 0x0); - if (!data->i2c) { + if (IS_ERR_OR_NULL(data->i2c)) { /* * after the pannel is powered up the first time, * we can access the i2c bus so probe for the DAC @@ -119,7 +119,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data) .addr = DAC_BASE, .platform_data = data->spi, }; - data->i2c = i2c_new_device(adap, &info); + data->i2c = i2c_new_client_device(adap, &info); } } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 9d28a8e3328f..e2a490c5ae08 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) ops->graphics = 1; if (!blank) { - var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE; + var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE | + FB_ACTIVATE_KD_TEXT; fb_set_var(info, &var); ops->graphics = 0; ops->var = info->var; diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c index f02be0db335e..8d418abdd767 100644 --- a/drivers/video/fbdev/hpfb.c +++ b/drivers/video/fbdev/hpfb.c @@ -402,7 +402,7 @@ int __init hpfb_init(void) if (err) return err; - err = probe_kernel_read(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1); + err = copy_from_kernel_nofault(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1); if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) { if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat")) diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index bee29aadc646..def14ac0ebe1 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options) else if (!strcmp(this_opt, "noedid")) noedid = true; else if (!strcmp(this_opt, "noblank")) - blank = true; + blank = false; else if (!strncmp(this_opt, "vtotal:", 7)) vram_total = simple_strtoul(this_opt + 7, NULL, 0); else if (!strncmp(this_opt, "vremap:", 7)) diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 50c689f25045..f26f5f64ae82 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -101,6 +101,11 @@ struct virtio_mem { /* The parent resource for all memory added via this device. */ struct resource *parent_resource; + /* + * Copy of "System RAM (virtio_mem)" to be used for + * add_memory_driver_managed(). + */ + const char *resource_name; /* Summary of all memory block states. */ unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT]; @@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id) if (nid == NUMA_NO_NODE) nid = memory_add_physaddr_to_nid(addr); + /* + * When force-unloading the driver and we still have memory added to + * Linux, the resource name has to stay. + */ + if (!vm->resource_name) { + vm->resource_name = kstrdup_const("System RAM (virtio_mem)", + GFP_KERNEL); + if (!vm->resource_name) + return -ENOMEM; + } + dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id); - return add_memory(nid, addr, memory_block_size_bytes()); + return add_memory_driver_managed(nid, addr, memory_block_size_bytes(), + vm->resource_name); } /* @@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id, VIRTIO_MEM_MB_STATE_OFFLINE); } - return rc; + return 0; } /* @@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev) vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] || vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] || vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] || - vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) + vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) { dev_warn(&vdev->dev, "device still has system memory added\n"); - else + } else { virtio_mem_delete_resource(vm); + kfree_const(vm->resource_name); + } /* remove all tracking data - no locking needed */ vfree(vm->mb_state); diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 3041092e84b3..449680a61569 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h @@ -73,7 +73,7 @@ struct w1_netlink_msg __u32 res; } mst; } id; - __u8 data[0]; + __u8 data[]; }; /** @@ -122,7 +122,7 @@ struct w1_netlink_cmd __u8 cmd; __u8 res; __u16 len; - __u8 data[0]; + __u8 data[]; }; #ifdef __KERNEL__ diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 005921e3b38d..5b79cdceefa0 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, return ERR_PTR(-ENOMEM); } + cell->name = kmalloc(namelen + 1, GFP_KERNEL); + if (!cell->name) { + kfree(cell); + return ERR_PTR(-ENOMEM); + } + cell->net = net; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); + cell->name[i] = 0; atomic_set(&cell->usage, 2); INIT_WORK(&cell->manager, afs_manage_cell); @@ -207,6 +214,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); error: + kfree(cell->name); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); @@ -489,6 +497,7 @@ static void afs_cell_destroy(struct rcu_head *rcu) afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers)); afs_put_cell(cell->net, cell->alias_of); key_put(cell->anonymous_key); + kfree(cell->name); kfree(cell); _leave(" [destroyed]"); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index aa1d34141ea3..96757f3abd74 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -648,7 +648,7 @@ static void afs_do_lookup_success(struct afs_operation *op) vp = &op->file[0]; abort_code = vp->scb.status.abort_code; if (abort_code != 0) { - op->abort_code = abort_code; + op->ac.abort_code = abort_code; op->error = afs_abort_to_error(abort_code); } break; @@ -696,10 +696,11 @@ static const struct afs_operation_ops afs_inline_bulk_status_operation = { .success = afs_do_lookup_success, }; -static const struct afs_operation_ops afs_fetch_status_operation = { +static const struct afs_operation_ops afs_lookup_fetch_status_operation = { .issue_afs_rpc = afs_fs_fetch_status, .issue_yfs_rpc = yfs_fs_fetch_status, .success = afs_do_lookup_success, + .aborted = afs_check_for_remote_deletion, }; /* @@ -844,7 +845,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, * to FS.FetchStatus for op->file[1]. */ op->fetch_status.which = 1; - op->ops = &afs_fetch_status_operation; + op->ops = &afs_lookup_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); } @@ -1236,6 +1237,17 @@ void afs_d_release(struct dentry *dentry) _enter("%pd", dentry); } +void afs_check_for_remote_deletion(struct afs_operation *op) +{ + struct afs_vnode *vnode = op->file[0].vnode; + + switch (op->ac.abort_code) { + case VNOVNODE: + set_bit(AFS_VNODE_DELETED, &vnode->flags); + afs_break_callback(vnode, afs_cb_break_for_deleted); + } +} + /* * Create a new inode for create/mkdir/symlink */ @@ -1268,7 +1280,7 @@ static void afs_vnode_new_inode(struct afs_operation *op) static void afs_create_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, op->file[0].vnode); + op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); afs_update_dentry_version(op, &op->file[0], op->dentry); afs_vnode_new_inode(op); @@ -1302,6 +1314,7 @@ static const struct afs_operation_ops afs_mkdir_operation = { .issue_afs_rpc = afs_fs_make_dir, .issue_yfs_rpc = yfs_fs_make_dir, .success = afs_create_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; @@ -1325,6 +1338,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; op->dentry = dentry; op->create.mode = S_IFDIR | mode; op->create.reason = afs_edit_dir_for_mkdir; @@ -1350,7 +1364,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry) static void afs_rmdir_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, op->file[0].vnode); + op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); afs_update_dentry_version(op, &op->file[0], op->dentry); } @@ -1382,6 +1396,7 @@ static const struct afs_operation_ops afs_rmdir_operation = { .issue_afs_rpc = afs_fs_remove_dir, .issue_yfs_rpc = yfs_fs_remove_dir, .success = afs_rmdir_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_rmdir_edit_dir, .put = afs_rmdir_put, }; @@ -1404,6 +1419,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; op->dentry = dentry; op->ops = &afs_rmdir_operation; @@ -1479,7 +1495,8 @@ static void afs_dir_remove_link(struct afs_operation *op) static void afs_unlink_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, op->file[0].vnode); + op->ctime = op->file[0].scb.status.mtime_client; + afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[1]); afs_update_dentry_version(op, &op->file[0], op->dentry); @@ -1511,6 +1528,7 @@ static const struct afs_operation_ops afs_unlink_operation = { .issue_afs_rpc = afs_fs_remove_file, .issue_yfs_rpc = yfs_fs_remove_file, .success = afs_unlink_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_unlink_edit_dir, .put = afs_unlink_put, }; @@ -1537,6 +1555,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; /* Try to make sure we have a callback promise on the victim. */ ret = afs_validate(vnode, op->key); @@ -1561,9 +1580,25 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) spin_unlock(&dentry->d_lock); op->file[1].vnode = vnode; + op->file[1].update_ctime = true; + op->file[1].op_unlinked = true; op->dentry = dentry; op->ops = &afs_unlink_operation; - return afs_do_sync_operation(op); + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + + /* If there was a conflict with a third party, check the status of the + * unlinked vnode. + */ + if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) { + op->file[1].update_ctime = false; + op->fetch_status.which = 1; + op->ops = &afs_fetch_status_operation; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + } + + return afs_put_operation(op); error: return afs_put_operation(op); @@ -1573,6 +1608,7 @@ static const struct afs_operation_ops afs_create_operation = { .issue_afs_rpc = afs_fs_create_file, .issue_yfs_rpc = yfs_fs_create_file, .success = afs_create_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; @@ -1601,6 +1637,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; op->dentry = dentry; op->create.mode = S_IFREG | mode; @@ -1620,6 +1657,7 @@ static void afs_link_success(struct afs_operation *op) struct afs_vnode_param *vp = &op->file[1]; _enter("op=%08x", op->debug_id); + op->ctime = dvp->scb.status.mtime_client; afs_vnode_commit_status(op, dvp); afs_vnode_commit_status(op, vp); afs_update_dentry_version(op, dvp, op->dentry); @@ -1640,6 +1678,7 @@ static const struct afs_operation_ops afs_link_operation = { .issue_afs_rpc = afs_fs_link, .issue_yfs_rpc = yfs_fs_link, .success = afs_link_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_link_put, }; @@ -1672,6 +1711,8 @@ static int afs_link(struct dentry *from, struct inode *dir, afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, vnode); op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; + op->file[1].update_ctime = true; op->dentry = dentry; op->dentry_2 = from; @@ -1689,6 +1730,7 @@ static const struct afs_operation_ops afs_symlink_operation = { .issue_afs_rpc = afs_fs_symlink, .issue_yfs_rpc = yfs_fs_symlink, .success = afs_create_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_create_edit_dir, .put = afs_create_put, }; @@ -1740,9 +1782,13 @@ static void afs_rename_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); + op->ctime = op->file[0].scb.status.mtime_client; + afs_check_dir_conflict(op, &op->file[1]); afs_vnode_commit_status(op, &op->file[0]); - if (op->file[1].vnode != op->file[0].vnode) + if (op->file[1].vnode != op->file[0].vnode) { + op->ctime = op->file[1].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[1]); + } } static void afs_rename_edit_dir(struct afs_operation *op) @@ -1860,6 +1906,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */ op->file[0].dv_delta = 1; op->file[1].dv_delta = 1; + op->file[0].update_ctime = true; + op->file[1].update_ctime = true; op->dentry = old_dentry; op->dentry_2 = new_dentry; diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index b14e3d9a25e2..04f75a44f243 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -16,6 +16,7 @@ static void afs_silly_rename_success(struct afs_operation *op) { _enter("op=%08x", op->debug_id); + afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); } @@ -69,6 +70,11 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode return PTR_ERR(op); afs_op_set_vnode(op, 0, dvnode); + afs_op_set_vnode(op, 1, dvnode); + op->file[0].dv_delta = 1; + op->file[1].dv_delta = 1; + op->file[0].update_ctime = true; + op->file[1].update_ctime = true; op->dentry = old; op->dentry_2 = new; @@ -129,6 +135,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, switch (ret) { case 0: /* The rename succeeded. */ + set_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags); d_move(dentry, sdentry); break; case -ERESTARTSYS: @@ -148,19 +155,11 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, static void afs_silly_unlink_success(struct afs_operation *op) { - struct afs_vnode *vnode = op->file[1].vnode; - _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, op->file[0].vnode); + afs_check_dir_conflict(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[1]); afs_update_dentry_version(op, &op->file[0], op->dentry); - - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { - set_bit(AFS_VNODE_DELETED, &vnode->flags); - clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); - } } static void afs_silly_unlink_edit_dir(struct afs_operation *op) @@ -181,6 +180,7 @@ static const struct afs_operation_ops afs_silly_unlink_operation = { .issue_afs_rpc = afs_fs_remove_file, .issue_yfs_rpc = yfs_fs_remove_file, .success = afs_silly_unlink_success, + .aborted = afs_check_for_remote_deletion, .edit_dir = afs_silly_unlink_edit_dir, }; @@ -200,12 +200,30 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode afs_op_set_vnode(op, 0, dvnode); afs_op_set_vnode(op, 1, vnode); + op->file[0].dv_delta = 1; + op->file[0].update_ctime = true; + op->file[1].op_unlinked = true; + op->file[1].update_ctime = true; op->dentry = dentry; op->ops = &afs_silly_unlink_operation; trace_afs_silly_rename(vnode, true); - return afs_do_sync_operation(op); + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + + /* If there was a conflict with a third party, check the status of the + * unlinked vnode. + */ + if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) { + op->file[1].update_ctime = false; + op->fetch_status.which = 1; + op->ops = &afs_fetch_status_operation; + afs_begin_vnode_operation(op); + afs_wait_for_operation(op); + } + + return afs_put_operation(op); } /* diff --git a/fs/afs/file.c b/fs/afs/file.c index 506c47471b42..6f6ed1605cfe 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -225,7 +225,6 @@ static void afs_fetch_data_success(struct afs_operation *op) struct afs_vnode *vnode = op->file[0].vnode; _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, vnode); afs_vnode_commit_status(op, &op->file[0]); afs_stat_v(vnode, n_fetches); atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes); @@ -240,6 +239,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = { .issue_afs_rpc = afs_fs_fetch_data, .issue_yfs_rpc = yfs_fs_fetch_data, .success = afs_fetch_data_success, + .aborted = afs_check_for_remote_deletion, .put = afs_fetch_data_put, }; diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 71eea2a908c7..ffb8575345ca 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -175,10 +175,7 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode) static void afs_lock_success(struct afs_operation *op) { - struct afs_vnode *vnode = op->file[0].vnode; - _enter("op=%08x", op->debug_id); - afs_check_for_remote_deletion(op, vnode); afs_vnode_commit_status(op, &op->file[0]); } @@ -186,6 +183,7 @@ static const struct afs_operation_ops afs_set_lock_operation = { .issue_afs_rpc = afs_fs_set_lock, .issue_yfs_rpc = yfs_fs_set_lock, .success = afs_lock_success, + .aborted = afs_check_for_remote_deletion, }; /* diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 2d2dff5688a4..c264839b2fd0 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -187,9 +187,17 @@ void afs_wait_for_operation(struct afs_operation *op) op->error = afs_wait_for_call_to_complete(op->call, &op->ac); } - if (op->error == 0) { + switch (op->error) { + case 0: _debug("success"); op->ops->success(op); + break; + case -ECONNABORTED: + if (op->ops->aborted) + op->ops->aborted(op); + break; + default: + break; } afs_end_vnode_operation(op); diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index b34f74b0f319..5d9ef517cf81 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -314,7 +314,7 @@ void afs_fs_probe_timer(struct timer_list *timer) { struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer); - if (!queue_work(afs_wq, &net->fs_prober)) + if (!net->live || !queue_work(afs_wq, &net->fs_prober)) afs_dec_servers_outstanding(net); } @@ -458,3 +458,12 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr) return -ETIME; return -EDESTADDRREQ; } + +/* + * Clean up the probing when the namespace is killed off. + */ +void afs_fs_probe_cleanup(struct afs_net *net) +{ + if (del_timer_sync(&net->fs_probe_timer)) + afs_dec_servers_outstanding(net); +} diff --git a/fs/afs/inode.c b/fs/afs/inode.c index cd0a0060950b..1d13d2e882ad 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -165,9 +165,11 @@ static void afs_apply_status(struct afs_operation *op, { struct afs_file_status *status = &vp->scb.status; struct afs_vnode *vnode = vp->vnode; + struct inode *inode = &vnode->vfs_inode; struct timespec64 t; umode_t mode; bool data_changed = false; + bool change_size = vp->set_size; _enter("{%llx:%llu.%u} %s", vp->fid.vid, vp->fid.vnode, vp->fid.unique, @@ -186,25 +188,25 @@ static void afs_apply_status(struct afs_operation *op, } if (status->nlink != vnode->status.nlink) - set_nlink(&vnode->vfs_inode, status->nlink); + set_nlink(inode, status->nlink); if (status->owner != vnode->status.owner) - vnode->vfs_inode.i_uid = make_kuid(&init_user_ns, status->owner); + inode->i_uid = make_kuid(&init_user_ns, status->owner); if (status->group != vnode->status.group) - vnode->vfs_inode.i_gid = make_kgid(&init_user_ns, status->group); + inode->i_gid = make_kgid(&init_user_ns, status->group); if (status->mode != vnode->status.mode) { - mode = vnode->vfs_inode.i_mode; + mode = inode->i_mode; mode &= ~S_IALLUGO; mode |= status->mode; - WRITE_ONCE(vnode->vfs_inode.i_mode, mode); + WRITE_ONCE(inode->i_mode, mode); } t = status->mtime_client; - vnode->vfs_inode.i_ctime = t; - vnode->vfs_inode.i_mtime = t; - vnode->vfs_inode.i_atime = t; + inode->i_mtime = t; + if (vp->update_ctime) + inode->i_ctime = op->ctime; if (vnode->status.data_version != status->data_version) data_changed = true; @@ -226,6 +228,7 @@ static void afs_apply_status(struct afs_operation *op, } else { set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); } + change_size = true; } else if (vnode->status.type == AFS_FTYPE_DIR) { /* Expected directory change is handled elsewhere so * that we can locally edit the directory and save on a @@ -233,11 +236,22 @@ static void afs_apply_status(struct afs_operation *op, */ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) data_changed = false; + change_size = true; } if (data_changed) { - inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); - afs_set_i_size(vnode, status->size); + inode_set_iversion_raw(inode, status->data_version); + + /* Only update the size if the data version jumped. If the + * file is being modified locally, then we might have our own + * idea of what the size should be that's not the same as + * what's on the server. + */ + if (change_size) { + afs_set_i_size(vnode, status->size); + inode->i_ctime = t; + inode->i_atime = t; + } } } @@ -267,32 +281,39 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v _enter(""); - ASSERTCMP(op->error, ==, 0); - write_seqlock(&vnode->cb_lock); if (vp->scb.have_error) { + /* A YFS server will return this from RemoveFile2 and AFS and + * YFS will return this from InlineBulkStatus. + */ if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_nlink(&vnode->vfs_inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); + op->flags &= ~AFS_OPERATION_DIR_CONFLICT; } - } else { - if (vp->scb.have_status) - afs_apply_status(op, vp); + } else if (vp->scb.have_status) { + afs_apply_status(op, vp); if (vp->scb.have_cb) afs_apply_callback(op, vp); + } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) { + drop_nlink(&vnode->vfs_inode); + if (vnode->vfs_inode.i_nlink == 0) { + set_bit(AFS_VNODE_DELETED, &vnode->flags); + __afs_break_callback(vnode, afs_cb_break_for_deleted); + } } write_sequnlock(&vnode->cb_lock); - if (op->error == 0 && vp->scb.have_status) + if (vp->scb.have_status) afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } static void afs_fetch_status_success(struct afs_operation *op) { - struct afs_vnode_param *vp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_vnode *vnode = vp->vnode; int ret; @@ -306,10 +327,11 @@ static void afs_fetch_status_success(struct afs_operation *op) } } -static const struct afs_operation_ops afs_fetch_status_operation = { +const struct afs_operation_ops afs_fetch_status_operation = { .issue_afs_rpc = afs_fs_fetch_status, .issue_yfs_rpc = yfs_fs_fetch_status, .success = afs_fetch_status_success, + .aborted = afs_check_for_remote_deletion, }; /* @@ -716,6 +738,9 @@ int afs_getattr(const struct path *path, struct kstat *stat, do { read_seqbegin_or_lock(&vnode->cb_lock, &seq); generic_fillattr(inode, stat); + if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) && + stat->nlink > 0) + stat->nlink -= 1; } while (need_seqretry(&vnode->cb_lock, seq)); done_seqretry(&vnode->cb_lock, seq); @@ -785,7 +810,15 @@ void afs_evict_inode(struct inode *inode) static void afs_setattr_success(struct afs_operation *op) { + struct inode *inode = &op->file[0].vnode->vfs_inode; + afs_vnode_commit_status(op, &op->file[0]); + if (op->setattr.attr->ia_valid & ATTR_SIZE) { + loff_t i_size = inode->i_size, size = op->setattr.attr->ia_size; + if (size > i_size) + pagecache_isize_extended(inode, i_size, size); + truncate_pagecache(inode, size); + } } static const struct afs_operation_ops afs_setattr_operation = { @@ -801,17 +834,31 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr) { struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); + int ret; _enter("{%llx:%llu},{n=%pd},%x", vnode->fid.vid, vnode->fid.vnode, dentry, attr->ia_valid); if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID | - ATTR_MTIME))) { + ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | + ATTR_TOUCH))) { _leave(" = 0 [unsupported]"); return 0; } + if (attr->ia_valid & ATTR_SIZE) { + if (!S_ISREG(vnode->vfs_inode.i_mode)) + return -EISDIR; + + ret = inode_newsize_ok(&vnode->vfs_inode, attr->ia_size); + if (ret) + return ret; + + if (attr->ia_size == i_size_read(&vnode->vfs_inode)) + attr->ia_valid &= ~ATTR_SIZE; + } + /* flush any dirty data outstanding on a regular file */ if (S_ISREG(vnode->vfs_inode.i_mode)) filemap_write_and_wait(vnode->vfs_inode.i_mapping); @@ -825,8 +872,12 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr) afs_op_set_vnode(op, 0, vnode); op->setattr.attr = attr; - if (attr->ia_valid & ATTR_SIZE) + if (attr->ia_valid & ATTR_SIZE) { op->file[0].dv_delta = 1; + op->file[0].set_size = true; + } + op->ctime = attr->ia_ctime; + op->file[0].update_ctime = 1; op->ops = &afs_setattr_operation; return afs_do_sync_operation(op); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 0c9806ef2a19..792ac711985e 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -388,7 +388,7 @@ struct afs_cell { struct afs_vlserver_list __rcu *vl_servers; u8 name_len; /* Length of name */ - char name[64 + 1]; /* Cell name, case-flattened and NUL-padded */ + char *name; /* Cell name, case-flattened and NUL-padded */ }; /* @@ -634,6 +634,7 @@ struct afs_vnode { #define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */ #define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */ #define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */ +#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */ struct list_head wb_keys; /* List of keys available for writeback */ struct list_head pending_locks; /* locks waiting to be granted */ @@ -744,8 +745,11 @@ struct afs_vnode_param { afs_dataversion_t dv_before; /* Data version before the call */ unsigned int cb_break_before; /* cb_break + cb_s_break before the call */ u8 dv_delta; /* Expected change in data version */ - bool put_vnode; /* T if we have a ref on the vnode */ - bool need_io_lock; /* T if we need the I/O lock on this */ + bool put_vnode:1; /* T if we have a ref on the vnode */ + bool need_io_lock:1; /* T if we need the I/O lock on this */ + bool update_ctime:1; /* Need to update the ctime */ + bool set_size:1; /* Must update i_size */ + bool op_unlinked:1; /* True if file was unlinked by op */ }; /* @@ -766,9 +770,9 @@ struct afs_operation { struct dentry *dentry; /* Dentry to be altered */ struct dentry *dentry_2; /* Second dentry to be altered */ struct timespec64 mtime; /* Modification time to record */ + struct timespec64 ctime; /* Change time to set */ short nr_files; /* Number of entries in file[], more_files */ short error; - unsigned int abort_code; unsigned int debug_id; unsigned int cb_v_break; /* Volume break counter before op */ @@ -837,6 +841,7 @@ struct afs_operation { #define AFS_OPERATION_LOCK_1 0x0200 /* Set if have io_lock on file[1] */ #define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */ #define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */ +#define AFS_OPERATION_DIR_CONFLICT 0x1000 /* Set if we detected a 3rd-party dir change */ }; /* @@ -932,6 +937,7 @@ extern const struct address_space_operations afs_dir_aops; extern const struct dentry_operations afs_fs_dentry_operations; extern void afs_d_release(struct dentry *); +extern void afs_check_for_remote_deletion(struct afs_operation *); /* * dir_edit.c @@ -1059,10 +1065,13 @@ extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long); extern void afs_probe_fileserver(struct afs_net *, struct afs_server *); extern void afs_fs_probe_dispatcher(struct work_struct *); extern int afs_wait_for_one_fs_probe(struct afs_server *, bool); +extern void afs_fs_probe_cleanup(struct afs_net *); /* * inode.c */ +extern const struct afs_operation_ops afs_fetch_status_operation; + extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *); extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *); extern int afs_ilookup5_test_by_fid(struct inode *, void *); @@ -1435,7 +1444,6 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t); /* * yfsclient.c */ -extern void yfs_fs_fetch_file_status(struct afs_operation *); extern void yfs_fs_fetch_data(struct afs_operation *); extern void yfs_fs_create_file(struct afs_operation *); extern void yfs_fs_make_dir(struct afs_operation *); @@ -1481,15 +1489,6 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) return &vnode->vfs_inode; } -static inline void afs_check_for_remote_deletion(struct afs_operation *op, - struct afs_vnode *vnode) -{ - if (op->error == -ENOENT) { - set_bit(AFS_VNODE_DELETED, &vnode->flags); - afs_break_callback(vnode, afs_cb_break_for_deleted); - } -} - /* * Note that a dentry got changed. We need to set d_fsdata to the data version * number derived from the result of the operation. It doesn't matter if @@ -1504,6 +1503,18 @@ static inline void afs_update_dentry_version(struct afs_operation *op, (void *)(unsigned long)dir_vp->scb.status.data_version; } +/* + * Check for a conflicting operation on a directory that we just unlinked from. + * If someone managed to sneak a link or an unlink in on the file we just + * unlinked, we won't be able to trust nlink on an AFS file (but not YFS). + */ +static inline void afs_check_dir_conflict(struct afs_operation *op, + struct afs_vnode_param *dvp) +{ + if (dvp->dv_before + dvp->dv_delta != dvp->scb.status.data_version) + op->flags |= AFS_OPERATION_DIR_CONFLICT; +} + static inline int afs_io_error(struct afs_call *call, enum afs_io_error where) { trace_afs_io_error(call->debug_id, -EIO, where); diff --git a/fs/afs/main.c b/fs/afs/main.c index 9c79c91e8005..31b472f7c734 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -100,6 +100,7 @@ static int __net_init afs_net_init(struct net *net_ns) timer_setup(&net->fs_timer, afs_servers_timer, 0); INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher); timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0); + atomic_set(&net->servers_outstanding, 1); ret = -ENOMEM; sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL); @@ -130,6 +131,7 @@ static int __net_init afs_net_init(struct net *net_ns) error_open_socket: net->live = false; + afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_purge_servers(net); error_cell_init: @@ -150,6 +152,7 @@ static void __net_exit afs_net_exit(struct net *net_ns) struct afs_net *net = afs_net(net_ns); net->live = false; + afs_fs_probe_cleanup(net); afs_cell_purge(net); afs_purge_servers(net); afs_close_socket(net); diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 52b19e9c1535..5334f1bd2bca 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -83,6 +83,7 @@ int afs_abort_to_error(u32 abort_code) case UAENOLCK: return -ENOLCK; case UAENOTEMPTY: return -ENOTEMPTY; case UAELOOP: return -ELOOP; + case UAEOVERFLOW: return -EOVERFLOW; case UAENOMEDIUM: return -ENOMEDIUM; case UAEDQUOT: return -EDQUOT; diff --git a/fs/afs/server.c b/fs/afs/server.c index 039e3488511c..e82e452e2612 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -605,11 +605,12 @@ void afs_purge_servers(struct afs_net *net) _enter(""); if (del_timer_sync(&net->fs_timer)) - atomic_dec(&net->servers_outstanding); + afs_dec_servers_outstanding(net); afs_queue_server_manager(net); _debug("wait"); + atomic_dec(&net->servers_outstanding); wait_var_event(&net->servers_outstanding, !atomic_read(&net->servers_outstanding)); _leave(""); diff --git a/fs/afs/write.c b/fs/afs/write.c index 768497f82aee..7437806332d9 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -194,11 +194,11 @@ int afs_write_end(struct file *file, struct address_space *mapping, i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) { - spin_lock(&vnode->wb_lock); + write_seqlock(&vnode->cb_lock); i_size = i_size_read(&vnode->vfs_inode); if (maybe_i_size > i_size) i_size_write(&vnode->vfs_inode, maybe_i_size); - spin_unlock(&vnode->wb_lock); + write_sequnlock(&vnode->cb_lock); } if (!PageUptodate(page)) { @@ -393,6 +393,7 @@ static void afs_store_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; + op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); if (op->error == 0) { afs_pages_written_back(vnode, op->store.first, op->store.last); @@ -491,6 +492,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, unsigned long count, priv; unsigned n, offset, to, f, t; pgoff_t start, first, last; + loff_t i_size, end; int loop, ret; _enter(",%lx", primary_page->index); @@ -591,7 +593,12 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, first = primary_page->index; last = first + count - 1; + end = (loff_t)last * PAGE_SIZE + to; + i_size = i_size_read(&vnode->vfs_inode); + _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); + if (end > i_size) + to = i_size & ~PAGE_MASK; ret = afs_store_data(mapping, first, last, offset, to); switch (ret) { @@ -844,6 +851,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) vmf->page->index, priv); SetPagePrivate(vmf->page); set_page_private(vmf->page, priv); + file_update_time(file); sb_end_pagefault(inode->i_sb); return VM_FAULT_LOCKED; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 52d5af5fcd44..8c24fdc899e3 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -329,29 +329,6 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp, *_bp += sizeof(*x) / sizeof(__be32); } -/* - * Deliver a reply that's a status, callback and volsync. - */ -static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call) -{ - struct afs_operation *op = call->op; - const __be32 *bp; - int ret; - - ret = afs_transfer_reply(call); - if (ret < 0) - return ret; - - /* unmarshall the reply once we've received all of it */ - bp = call->buffer; - xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb); - xdr_decode_YFSCallBack(&bp, call, &op->file[0].scb); - xdr_decode_YFSVolSync(&bp, &op->volsync); - - _leave(" = 0 [done]"); - return 0; -} - /* * Deliver reply data to operations that just return a file status and a volume * sync record. @@ -374,48 +351,6 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) return 0; } -/* - * YFS.FetchStatus operation type - */ -static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = { - .name = "YFS.FetchStatus(vnode)", - .op = yfs_FS_FetchStatus, - .deliver = yfs_deliver_fs_status_cb_and_volsync, - .destructor = afs_flat_call_destructor, -}; - -/* - * Fetch the status information for a file. - */ -void yfs_fs_fetch_file_status(struct afs_operation *op) -{ - struct afs_vnode_param *vp = &op->file[0]; - struct afs_call *call; - __be32 *bp; - - _enter(",%x,{%llx:%llu},,", - key_serial(op->key), vp->fid.vid, vp->fid.vnode); - - call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus_vnode, - sizeof(__be32) * 2 + - sizeof(struct yfs_xdr_YFSFid), - sizeof(struct yfs_xdr_YFSFetchStatus) + - sizeof(struct yfs_xdr_YFSCallBack) + - sizeof(struct yfs_xdr_YFSVolSync)); - if (!call) - return afs_op_nomem(op); - - /* marshall the parameters */ - bp = call->request; - bp = xdr_encode_u32(bp, YFSFETCHSTATUS); - bp = xdr_encode_u32(bp, 0); /* RPC flags */ - bp = xdr_encode_YFSFid(bp, &vp->fid); - yfs_check_req(call, bp); - - trace_afs_make_fs_call(call, &vp->fid); - afs_make_op_call(op, call, GFP_NOFS); -} - /* * Deliver reply data to an YFS.FetchData64. */ @@ -1604,13 +1539,37 @@ void yfs_fs_release_lock(struct afs_operation *op) afs_make_op_call(op, call, GFP_NOFS); } +/* + * Deliver a reply to YFS.FetchStatus + */ +static int yfs_deliver_fs_fetch_status(struct afs_call *call) +{ + struct afs_operation *op = call->op; + struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; + const __be32 *bp; + int ret; + + ret = afs_transfer_reply(call); + if (ret < 0) + return ret; + + /* unmarshall the reply once we've received all of it */ + bp = call->buffer; + xdr_decode_YFSFetchStatus(&bp, call, &vp->scb); + xdr_decode_YFSCallBack(&bp, call, &vp->scb); + xdr_decode_YFSVolSync(&bp, &op->volsync); + + _leave(" = 0 [done]"); + return 0; +} + /* * YFS.FetchStatus operation type */ static const struct afs_call_type yfs_RXYFSFetchStatus = { .name = "YFS.FetchStatus", .op = yfs_FS_FetchStatus, - .deliver = yfs_deliver_fs_status_cb_and_volsync, + .deliver = yfs_deliver_fs_fetch_status, .destructor = afs_flat_call_destructor, }; @@ -1619,7 +1578,7 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = { */ void yfs_fs_fetch_status(struct afs_operation *op) { - struct afs_vnode_param *vp = &op->file[0]; + struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_call *call; __be32 *bp; diff --git a/fs/aio.c b/fs/aio.c index 7ecddc2f38db..91e7cc4a9f17 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -67,7 +67,7 @@ struct aio_ring { unsigned header_length; /* size of aio_ring */ - struct io_event io_events[0]; + struct io_event io_events[]; }; /* 128 bytes + ring size */ /* diff --git a/fs/block_dev.c b/fs/block_dev.c index 47860e589388..0ae656e022fd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -75,7 +75,7 @@ static void bdev_write_inode(struct block_device *bdev) } /* Kill _all_ buffers and pagecache , dirty or not.. */ -void kill_bdev(struct block_device *bdev) +static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; @@ -84,8 +84,7 @@ void kill_bdev(struct block_device *bdev) invalidate_bh_lrus(); truncate_inode_pages(mapping, 0); -} -EXPORT_SYMBOL(kill_bdev); +} /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) @@ -1565,10 +1564,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) */ if (!for_part) { ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); + if (ret != 0) return ret; - } } restart: @@ -1637,8 +1634,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) goto out_clear; BUG_ON(for_part); ret = __blkdev_get(whole, mode, 1); - if (ret) + if (ret) { + bdput(whole); goto out_clear; + } bdev->bd_contains = whole; bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || @@ -1688,7 +1687,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) disk_unblock_events(disk); put_disk_and_module(disk); out: - bdput(bdev); return ret; } @@ -1755,6 +1753,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) bdput(whole); } + if (res) + bdput(bdev); + return res; } EXPORT_SYMBOL(blkdev_get); diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 176e8a292fd1..c037ef514b64 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -940,7 +940,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto out_put_group; + goto out; } /* @@ -978,7 +978,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); - goto out_put_group; + goto out; } clear_nlink(inode); /* One for the block groups ref */ @@ -1001,13 +1001,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); if (ret < 0) - goto out_put_group; + goto out; if (ret > 0) btrfs_release_path(path); if (ret == 0) { ret = btrfs_del_item(trans, tree_root, path); if (ret) - goto out_put_group; + goto out; btrfs_release_path(path); } @@ -1016,6 +1016,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, &fs_info->block_group_cache_tree); RB_CLEAR_NODE(&block_group->cache_node); + /* Once for the block groups rbtree */ + btrfs_put_block_group(block_group); + if (fs_info->first_logical_byte == block_group->start) fs_info->first_logical_byte = (u64)-1; spin_unlock(&fs_info->block_group_cache_lock); @@ -1089,6 +1092,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, spin_unlock(&block_group->space_info->lock); + /* + * Remove the free space for the block group from the free space tree + * and the block group's item from the extent tree before marking the + * block group as removed. This is to prevent races with tasks that + * freeze and unfreeze a block group, this task and another task + * allocating a new block group - the unfreeze task ends up removing + * the block group's extent map before the task calling this function + * deletes the block group item from the extent tree, allowing for + * another task to attempt to create another block group with the same + * item key (and failing with -EEXIST and a transaction abort). + */ + ret = remove_block_group_free_space(trans, block_group); + if (ret) + goto out; + + ret = remove_block_group_item(trans, path, block_group); + if (ret < 0) + goto out; + mutex_lock(&fs_info->chunk_mutex); spin_lock(&block_group->lock); block_group->removed = 1; @@ -1123,17 +1145,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, mutex_unlock(&fs_info->chunk_mutex); - ret = remove_block_group_free_space(trans, block_group); - if (ret) - goto out_put_group; - - /* Once for the block groups rbtree */ - btrfs_put_block_group(block_group); - - ret = remove_block_group_item(trans, path, block_group); - if (ret < 0) - goto out; - if (remove_em) { struct extent_map_tree *em_tree; @@ -1145,10 +1156,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, free_extent_map(em); } -out_put_group: +out: /* Once for the lookup reference */ btrfs_put_block_group(block_group); -out: if (remove_rsv) btrfs_delayed_refs_rsv_release(fs_info, 1); btrfs_free_path(path); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 30ce7039bc27..d404cce8ae40 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1009,6 +1009,8 @@ enum { BTRFS_ROOT_DEAD_RELOC_TREE, /* Mark dead root stored on device whose cleanup needs to be resumed */ BTRFS_ROOT_DEAD_TREE, + /* The root has a log tree. Used only for subvolume roots. */ + BTRFS_ROOT_HAS_LOG_TREE, }; /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2c14312b05e8..2520605afc25 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1533,7 +1533,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, } static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, - size_t *write_bytes) + size_t *write_bytes, bool nowait) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_root *root = inode->root; @@ -1541,27 +1541,43 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, u64 num_bytes; int ret; - if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) + if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock)) return -EAGAIN; lockstart = round_down(pos, fs_info->sectorsize); lockend = round_up(pos + *write_bytes, fs_info->sectorsize) - 1; - - btrfs_lock_and_flush_ordered_range(inode, lockstart, - lockend, NULL); - num_bytes = lockend - lockstart + 1; + + if (nowait) { + struct btrfs_ordered_extent *ordered; + + if (!try_lock_extent(&inode->io_tree, lockstart, lockend)) + return -EAGAIN; + + ordered = btrfs_lookup_ordered_range(inode, lockstart, + num_bytes); + if (ordered) { + btrfs_put_ordered_extent(ordered); + ret = -EAGAIN; + goto out_unlock; + } + } else { + btrfs_lock_and_flush_ordered_range(inode, lockstart, + lockend, NULL); + } + ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, NULL, NULL, NULL); if (ret <= 0) { ret = 0; - btrfs_drew_write_unlock(&root->snapshot_lock); + if (!nowait) + btrfs_drew_write_unlock(&root->snapshot_lock); } else { *write_bytes = min_t(size_t, *write_bytes , num_bytes - pos + lockstart); } - +out_unlock: unlock_extent(&inode->io_tree, lockstart, lockend); return ret; @@ -1633,7 +1649,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) && check_can_nocow(BTRFS_I(inode), pos, - &write_bytes) > 0) { + &write_bytes, false) > 0) { /* * For nodata cow case, no need to reserve * data space. @@ -1904,13 +1920,25 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, pos = iocb->ki_pos; count = iov_iter_count(from); if (iocb->ki_flags & IOCB_NOWAIT) { + size_t nocow_bytes = count; + /* * We will allocate space in case nodatacow is not set, * so bail */ if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) || - check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) { + check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes, + true) <= 0) { + inode_unlock(inode); + return -EAGAIN; + } + /* + * There are holes in the range or parts of the range that must + * be COWed (shared extents, RO block groups, etc), so just bail + * out. + */ + if (nocow_bytes < count) { inode_unlock(inode); return -EAGAIN; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d04c82c88418..18d384f4af54 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode, u64 num_bytes; unsigned long ram_size; u64 cur_alloc_size = 0; + u64 min_alloc_size; u64 blocksize = fs_info->sectorsize; struct btrfs_key ins; struct extent_map *em; @@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode, btrfs_drop_extent_cache(BTRFS_I(inode), start, start + num_bytes - 1, 0); + /* + * Relocation relies on the relocated extents to have exactly the same + * size as the original extents. Normally writeback for relocation data + * extents follows a NOCOW path because relocation preallocates the + * extents. However, due to an operation such as scrub turning a block + * group to RO mode, it may fallback to COW mode, so we must make sure + * an extent allocated during COW has exactly the requested size and can + * not be split into smaller extents, otherwise relocation breaks and + * fails during the stage where it updates the bytenr of file extent + * items. + */ + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + min_alloc_size = num_bytes; + else + min_alloc_size = fs_info->sectorsize; + while (num_bytes > 0) { cur_alloc_size = num_bytes; ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size, - fs_info->sectorsize, 0, alloc_hint, + min_alloc_size, 0, alloc_hint, &ins, 1, 1); if (ret < 0) goto out_unlock; @@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page, int *page_started, unsigned long *nr_written) { const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode)); + const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid == + BTRFS_DATA_RELOC_TREE_OBJECTID); const u64 range_bytes = end + 1 - start; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; u64 range_start = start; @@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page, * data space info, which we incremented in the step above. * * If we need to fallback to cow and the inode corresponds to a free - * space cache inode, we must also increment bytes_may_use of the data - * space_info for the same reason. Space caches always get a prealloc + * space cache inode or an inode of the data relocation tree, we must + * also increment bytes_may_use of the data space_info for the same + * reason. Space caches and relocated data extents always get a prealloc * extent for them, however scrub or balance may have set the block - * group that contains that extent to RO mode. + * group that contains that extent to RO mode and therefore force COW + * when starting writeback. */ count = count_range_bits(io_tree, &range_start, end, range_bytes, EXTENT_NORESERVE, 0); - if (count > 0 || is_space_ino) { - const u64 bytes = is_space_ino ? range_bytes : count; + if (count > 0 || is_space_ino || is_reloc_ino) { + u64 bytes = count; struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; struct btrfs_space_info *sinfo = fs_info->data_sinfo; + if (is_space_ino || is_reloc_ino) + bytes = range_bytes; + spin_lock(&sinfo->lock); btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes); spin_unlock(&sinfo->lock); @@ -7865,9 +7889,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.overwrite = 1; inode_unlock(inode); relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; } ret = btrfs_delalloc_reserve_space(inode, &data_reserved, offset, count); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 168deb8ef68a..e8f7c5f00894 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2692,7 +2692,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp) btrfs_put_root(root); out_free: btrfs_free_path(path); - kzfree(subvol_info); + kfree(subvol_info); return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 920cee312f4e..cd5348f352dd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, if (ret) goto out; + set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); root->log_start_pid = current->pid; } @@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root) { int ret = -ENOENT; + if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) + return ret; + mutex_lock(&root->log_mutex); if (root->log_root) { ret = 0; @@ -3303,6 +3307,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) if (root->log_root) { free_log_tree(trans, root->log_root); root->log_root = NULL; + clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); } return 0; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4fe757cfc360..9b0f8f33f832 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4336,7 +4336,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __SetPageLocked(page); - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); + if (rc) { __ClearPageLocked(page); break; } @@ -4352,6 +4353,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; + int err = 0; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); @@ -4396,7 +4398,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ - while (!list_empty(page_list)) { + while (!list_empty(page_list) && !err) { unsigned int i, nr_pages, bytes, rsize; loff_t offset; struct page *page, *tpage; @@ -4429,9 +4431,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, return 0; } - rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + nr_pages = 0; + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, &nr_pages, &offset, &bytes); - if (rc) { + if (!nr_pages) { add_credits_and_wake_if(server, credits, 0); break; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 583f5e4008c2..ce95801e9b66 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2535,6 +2535,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, if (rc == 0) { cifsInode->server_eof = attrs->ia_size; cifs_setsize(inode, attrs->ia_size); + + /* + * The man page of truncate says if the size changed, + * then the st_ctime and st_mtime fields for the file + * are updated. + */ + attrs->ia_ctime = attrs->ia_mtime = current_time(inode); + attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME; + cifs_truncate_page(inode->i_mapping, inode->i_size); } diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 56791a692c8b..e44d049142d0 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -844,28 +844,26 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) struct bio_vec *bv = NULL; if (iov_iter_is_kvec(iter)) { - memcpy(&ctx->iter, iter, sizeof(struct iov_iter)); + memcpy(&ctx->iter, iter, sizeof(*iter)); ctx->len = count; iov_iter_advance(iter, count); return 0; } - if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT) - bv = kmalloc_array(max_pages, sizeof(struct bio_vec), - GFP_KERNEL); + if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT) + bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); if (!bv) { - bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec))); + bv = vmalloc(array_size(max_pages, sizeof(*bv))); if (!bv) return -ENOMEM; } - if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT) - pages = kmalloc_array(max_pages, sizeof(struct page *), - GFP_KERNEL); + if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT) + pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); if (!pages) { - pages = vmalloc(array_size(max_pages, sizeof(struct page *))); + pages = vmalloc(array_size(max_pages, sizeof(*pages))); if (!pages) { kvfree(bv); return -ENOMEM; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 736d86b8a910..d9fdafa5eb60 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -763,6 +763,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, /* close extra handle outside of crit sec */ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); } + rc = 0; goto oshr_free; } @@ -3187,6 +3188,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid, ses->Suid, offset, len); + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) @@ -3253,6 +3259,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, return rc; } + /* + * We implement the punch hole through ioctl, so we need remove the page + * caches first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index e9e27a271af0..feaa5e182b7b 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c @@ -51,6 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file, } else { inode_lock(inode); i_size_write(inode, datasize + sizeof(attributes)); + inode->i_mtime = current_time(inode); inode_unlock(inode); } @@ -72,10 +73,8 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf, ssize_t size = 0; int err; - while (!__ratelimit(&file->f_cred->user->ratelimit)) { - if (!msleep_interruptible(50)) - return -EINTR; - } + while (!__ratelimit(&file->f_cred->user->ratelimit)) + msleep(50); err = efivar_entry_size(var, &datasize); diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index 7824f5563a55..9b66c28b3ae9 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page) static inline void z_erofs_onlinepage_fixup(struct page *page, uintptr_t index, bool down) { - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); + union z_erofs_onlinepage_converter u = { .v = &page_private(page) }; + int orig, orig_index, val; - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { +repeat: + orig = atomic_read(u.o); + orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (orig_index) { if (!index) return; - DBG_BUGON(id != index); + DBG_BUGON(orig_index != index); } - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) + val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (atomic_cmpxchg(u.o, orig, val) != orig) goto repeat; } diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index 4ccb3c9189d8..2e42f47a7f98 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile @@ -9,7 +9,8 @@ ext4-y := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \ extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \ indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \ mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \ - super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o + super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \ + xattr_user.o ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index c654205f648d..1d82336b1cd4 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -675,6 +675,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, struct qstr qstr = {.name = str, .len = len }; const struct dentry *parent = READ_ONCE(dentry->d_parent); const struct inode *inode = READ_ONCE(parent->d_inode); + char strbuf[DNAME_INLINE_LEN]; if (!inode || !IS_CASEFOLDED(inode) || !EXT4_SB(inode->i_sb)->s_encoding) { @@ -683,6 +684,21 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len, return memcmp(str, name->name, len); } + /* + * If the dentry name is stored in-line, then it may be concurrently + * modified by a rename. If this happens, the VFS will eventually retry + * the lookup, so it doesn't matter what ->d_compare() returns. + * However, it's unsafe to call utf8_strncasecmp() with an unstable + * string. Therefore, we have to copy the name into a temporary buffer. + */ + if (len <= DNAME_INLINE_LEN - 1) { + memcpy(strbuf, str, len); + strbuf[len] = 0; + qstr.name = strbuf; + /* prevent compiler from optimizing out the temporary buffer */ + barrier(); + } + return ext4_ci_compare(inode, name, &qstr, false); } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b08841f70b69..42f5060f3cdf 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -426,13 +426,16 @@ struct flex_groups { #define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ /* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */ + +#define EXT4_DAX_FL 0x02000000 /* Inode is DAX */ + #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ -#define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */ -#define EXT4_FL_USER_MODIFIABLE 0x604BC0FF /* User modifiable flags */ +#define EXT4_FL_USER_VISIBLE 0x725BDFFF /* User visible flags */ +#define EXT4_FL_USER_MODIFIABLE 0x624BC0FF /* User modifiable flags */ /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ #define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \ @@ -440,14 +443,16 @@ struct flex_groups { EXT4_APPEND_FL | \ EXT4_NODUMP_FL | \ EXT4_NOATIME_FL | \ - EXT4_PROJINHERIT_FL) + EXT4_PROJINHERIT_FL | \ + EXT4_DAX_FL) /* Flags that should be inherited by new inodes from their parent. */ #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\ EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\ EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\ EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL |\ - EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL) + EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL |\ + EXT4_DAX_FL) /* Flags that are appropriate for regular files (all but dir-specific ones). */ #define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL | EXT4_CASEFOLD_FL |\ @@ -459,6 +464,10 @@ struct flex_groups { /* The only flags that should be swapped */ #define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL) +/* Flags which are mutually exclusive to DAX */ +#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\ + EXT4_JOURNAL_DATA_FL) + /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) { @@ -499,6 +508,7 @@ enum { EXT4_INODE_VERITY = 20, /* Verity protected inode */ EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */ /* 22 was formerly EXT4_INODE_EOFBLOCKS */ + EXT4_INODE_DAX = 25, /* Inode is DAX */ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */ EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */ EXT4_INODE_CASEFOLD = 30, /* Casefolded directory */ @@ -1135,9 +1145,9 @@ struct ext4_inode_info { #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */ #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/ #ifdef CONFIG_FS_DAX -#define EXT4_MOUNT_DAX 0x00200 /* Direct Access */ +#define EXT4_MOUNT_DAX_ALWAYS 0x00200 /* Direct Access */ #else -#define EXT4_MOUNT_DAX 0 +#define EXT4_MOUNT_DAX_ALWAYS 0 #endif #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */ #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */ @@ -1180,6 +1190,8 @@ struct ext4_inode_info { blocks */ #define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated file systems */ +#define EXT4_MOUNT2_DAX_NEVER 0x00000008 /* Do not allow Direct Access */ +#define EXT4_MOUNT2_DAX_INODE 0x00000010 /* For printing options only */ #define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM 0x00000008 /* User explicitly specified journal checksum */ @@ -1992,6 +2004,7 @@ static inline bool ext4_has_incompat_features(struct super_block *sb) */ #define EXT4_FLAGS_RESIZING 0 #define EXT4_FLAGS_SHUTDOWN 1 +#define EXT4_FLAGS_BDEV_IS_DAX 2 static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi) { @@ -2705,7 +2718,7 @@ extern int ext4_can_truncate(struct inode *inode); extern int ext4_truncate(struct inode *); extern int ext4_break_layouts(struct inode *); extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); -extern void ext4_set_inode_flags(struct inode *); +extern void ext4_set_inode_flags(struct inode *, bool init); extern int ext4_alloc_da_blocks(struct inode *inode); extern void ext4_set_aops(struct inode *inode); extern int ext4_writepage_trans_blocks(struct inode *); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 7d088ff1e902..221f240eae60 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2844,7 +2844,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, * in use to avoid freeing it when removing blocks. */ if (sbi->s_cluster_ratio > 1) { - pblk = ext4_ext_pblock(ex) + end - ee_block + 2; + pblk = ext4_ext_pblock(ex) + end - ee_block + 1; partial.pclu = EXT4_B2C(sbi, pblk); partial.state = nofree; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 54d324e80fe5..df25d38d6539 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1116,7 +1116,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ei->i_block_group = group; ei->i_last_alloc_group = ~0; - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, true); if (IS_DIRSYNC(inode)) ext4_handle_sync(handle); if (insert_inode_locked(inode) < 0) { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 40ec5c7ef0d3..10dd470876b3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4403,9 +4403,11 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); } -static bool ext4_should_use_dax(struct inode *inode) +static bool ext4_should_enable_dax(struct inode *inode) { - if (!test_opt(inode->i_sb, DAX)) + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + + if (test_opt2(inode->i_sb, DAX_NEVER)) return false; if (!S_ISREG(inode->i_mode)) return false; @@ -4417,14 +4419,21 @@ static bool ext4_should_use_dax(struct inode *inode) return false; if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY)) return false; - return true; + if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) + return false; + if (test_opt(inode->i_sb, DAX_ALWAYS)) + return true; + + return ext4_test_inode_flag(inode, EXT4_INODE_DAX); } -void ext4_set_inode_flags(struct inode *inode) +void ext4_set_inode_flags(struct inode *inode, bool init) { unsigned int flags = EXT4_I(inode)->i_flags; unsigned int new_fl = 0; + WARN_ON_ONCE(IS_DAX(inode) && init); + if (flags & EXT4_SYNC_FL) new_fl |= S_SYNC; if (flags & EXT4_APPEND_FL) @@ -4435,8 +4444,13 @@ void ext4_set_inode_flags(struct inode *inode) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; - if (ext4_should_use_dax(inode)) + + /* Because of the way inode_set_flags() works we must preserve S_DAX + * here if already set. */ + new_fl |= (inode->i_flags & S_DAX); + if (init && ext4_should_enable_dax(inode)) new_fl |= S_DAX; + if (flags & EXT4_ENCRYPT_FL) new_fl |= S_ENCRYPTED; if (flags & EXT4_CASEFOLD_FL) @@ -4650,7 +4664,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, * not initialized on a new filesystem. */ } ei->i_flags = le32_to_cpu(raw_inode->i_flags); - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, true); inode->i_blocks = ext4_inode_blocks(raw_inode, ei); ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); if (ext4_has_feature_64bit(sb)) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 2162db0c747d..999cf6add39c 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -292,6 +292,38 @@ static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid, return 0; } +static void ext4_dax_dontcache(struct inode *inode, unsigned int flags) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + if (S_ISDIR(inode->i_mode)) + return; + + if (test_opt2(inode->i_sb, DAX_NEVER) || + test_opt(inode->i_sb, DAX_ALWAYS)) + return; + + if ((ei->i_flags ^ flags) & EXT4_DAX_FL) + d_mark_dontcache(inode); +} + +static bool dax_compatible(struct inode *inode, unsigned int oldflags, + unsigned int flags) +{ + if (flags & EXT4_DAX_FL) { + if ((oldflags & EXT4_DAX_MUT_EXCL) || + ext4_test_inode_state(inode, + EXT4_STATE_VERITY_IN_PROGRESS)) { + return false; + } + } + + if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL)) + return false; + + return true; +} + static int ext4_ioctl_setflags(struct inode *inode, unsigned int flags) { @@ -300,7 +332,6 @@ static int ext4_ioctl_setflags(struct inode *inode, int err = -EPERM, migrate = 0; struct ext4_iloc iloc; unsigned int oldflags, mask, i; - unsigned int jflag; struct super_block *sb = inode->i_sb; /* Is it quota file? Do not allow user to mess with it */ @@ -309,9 +340,6 @@ static int ext4_ioctl_setflags(struct inode *inode, oldflags = ei->i_flags; - /* The JOURNAL_DATA flag is modifiable only by root */ - jflag = flags & EXT4_JOURNAL_DATA_FL; - err = vfs_ioc_setflags_prepare(inode, oldflags, flags); if (err) goto flags_out; @@ -320,10 +348,16 @@ static int ext4_ioctl_setflags(struct inode *inode, * The JOURNAL_DATA flag can only be changed by * the relevant capability. */ - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { + if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { if (!capable(CAP_SYS_RESOURCE)) goto flags_out; } + + if (!dax_compatible(inode, oldflags, flags)) { + err = -EOPNOTSUPP; + goto flags_out; + } + if ((flags ^ oldflags) & EXT4_EXTENTS_FL) migrate = 1; @@ -369,6 +403,8 @@ static int ext4_ioctl_setflags(struct inode *inode, if (err) goto flags_err; + ext4_dax_dontcache(inode, flags); + for (i = 0, mask = 1; i < 32; i++, mask <<= 1) { if (!(mask & EXT4_FL_USER_MODIFIABLE)) continue; @@ -381,7 +417,8 @@ static int ext4_ioctl_setflags(struct inode *inode, ext4_clear_inode_flag(inode, i); } - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); + inode->i_ctime = current_time(inode); err = ext4_mark_iloc_dirty(handle, inode, &iloc); @@ -390,17 +427,18 @@ static int ext4_ioctl_setflags(struct inode *inode, if (err) goto flags_out; - if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { + if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) { /* * Changes to the journaling mode can cause unsafe changes to - * S_DAX if we are using the DAX mount option. + * S_DAX if the inode is DAX */ - if (test_opt(inode->i_sb, DAX)) { + if (IS_DAX(inode)) { err = -EBUSY; goto flags_out; } - err = ext4_change_inode_journal_flag(inode, jflag); + err = ext4_change_inode_journal_flag(inode, + flags & EXT4_JOURNAL_DATA_FL); if (err) goto flags_out; } @@ -527,12 +565,15 @@ static inline __u32 ext4_iflags_to_xflags(unsigned long iflags) xflags |= FS_XFLAG_NOATIME; if (iflags & EXT4_PROJINHERIT_FL) xflags |= FS_XFLAG_PROJINHERIT; + if (iflags & EXT4_DAX_FL) + xflags |= FS_XFLAG_DAX; return xflags; } #define EXT4_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ - FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT) + FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT | \ + FS_XFLAG_DAX) /* Transfer xflags flags to internal */ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags) @@ -551,6 +592,8 @@ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags) iflags |= EXT4_NOATIME_FL; if (xflags & FS_XFLAG_PROJINHERIT) iflags |= EXT4_PROJINHERIT_FL; + if (xflags & FS_XFLAG_DAX) + iflags |= EXT4_DAX_FL; return iflags; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index a9083113a8c0..c0a331e2feb0 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4708,7 +4708,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, } ac->ac_op = EXT4_MB_HISTORY_PREALLOC; - seq = *this_cpu_ptr(&discard_pa_seq); + seq = this_cpu_read(discard_pa_seq); if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c668f6b42374..330957ed1f05 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -522,9 +522,6 @@ static void ext4_handle_error(struct super_block *sb) smp_wmb(); sb->s_flags |= SB_RDONLY; } else if (test_opt(sb, ERRORS_PANIC)) { - if (EXT4_SB(sb)->s_journal && - !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) - return; panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } @@ -725,23 +722,20 @@ void __ext4_abort(struct super_block *sb, const char *function, va_end(args); if (sb_rdonly(sb) == 0) { - ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED; + if (EXT4_SB(sb)->s_journal) + jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); + + ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* * Make sure updated value of ->s_mount_flags will be visible * before ->s_flags update */ smp_wmb(); sb->s_flags |= SB_RDONLY; - if (EXT4_SB(sb)->s_journal) - jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); } - if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { - if (EXT4_SB(sb)->s_journal && - !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) - return; + if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) panic("EXT4-fs panic from previous error\n"); - } } void __ext4_msg(struct super_block *sb, @@ -1324,6 +1318,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode))) return -EINVAL; + if (ext4_test_inode_flag(inode, EXT4_INODE_DAX)) + return -EOPNOTSUPP; + res = ext4_convert_inline_data(inode); if (res) return res; @@ -1349,7 +1346,7 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, * Update inode->i_flags - S_ENCRYPTED will be enabled, * S_DAX may be disabled */ - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); } return res; } @@ -1376,7 +1373,7 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, * Update inode->i_flags - S_ENCRYPTED will be enabled, * S_DAX may be disabled */ - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); res = ext4_mark_inode_dirty(handle, inode); if (res) EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); @@ -1514,7 +1511,8 @@ enum { Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, - Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax, + Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, + Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error, Opt_nowarn_on_error, Opt_mblk_io_submit, Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize, @@ -1581,6 +1579,9 @@ static const match_table_t tokens = { {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, {Opt_dax, "dax"}, + {Opt_dax_always, "dax=always"}, + {Opt_dax_inode, "dax=inode"}, + {Opt_dax_never, "dax=never"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_warn_on_error, "warn_on_error"}, @@ -1729,6 +1730,7 @@ static int clear_qf_name(struct super_block *sb, int qtype) #define MOPT_NO_EXT3 0x0200 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3) #define MOPT_STRING 0x0400 +#define MOPT_SKIP 0x0800 static const struct mount_opts { int token; @@ -1778,7 +1780,13 @@ static const struct mount_opts { {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, - {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET}, + {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP}, + {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, + {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, + {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER, + MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, @@ -2123,13 +2131,56 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, } sbi->s_jquota_fmt = m->mount_opt; #endif - } else if (token == Opt_dax) { + } else if (token == Opt_dax || token == Opt_dax_always || + token == Opt_dax_inode || token == Opt_dax_never) { #ifdef CONFIG_FS_DAX - ext4_msg(sb, KERN_WARNING, - "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); - sbi->s_mount_opt |= m->mount_opt; + switch (token) { + case Opt_dax: + case Opt_dax_always: + if (is_remount && + (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || + (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) { + fail_dax_change_remount: + ext4_msg(sb, KERN_ERR, "can't change " + "dax mount option while remounting"); + return -1; + } + if (is_remount && + (test_opt(sb, DATA_FLAGS) == + EXT4_MOUNT_JOURNAL_DATA)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dax"); + return -1; + } + ext4_msg(sb, KERN_WARNING, + "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); + sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER; + break; + case Opt_dax_never: + if (is_remount && + (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || + (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS))) + goto fail_dax_change_remount; + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER; + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; + break; + case Opt_dax_inode: + if (is_remount && + ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) || + (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) || + !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE))) + goto fail_dax_change_remount; + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; + sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER; + /* Strictly for printing options */ + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE; + break; + } #else ext4_msg(sb, KERN_INFO, "dax option not supported"); + sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER; + sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS; return -1; #endif } else if (token == Opt_data_err_abort) { @@ -2293,7 +2344,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || - (m->flags & MOPT_CLEAR_ERR)) + (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP) continue; if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) continue; /* skip if same as the default */ @@ -2353,6 +2404,17 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, fscrypt_show_test_dummy_encryption(seq, sep, sb); + if (test_opt(sb, DAX_ALWAYS)) { + if (IS_EXT2_SB(sb)) + SEQ_OPTS_PUTS("dax"); + else + SEQ_OPTS_PUTS("dax=always"); + } else if (test_opt2(sb, DAX_NEVER)) { + SEQ_OPTS_PUTS("dax=never"); + } else if (test_opt2(sb, DAX_INODE)) { + SEQ_OPTS_PUTS("dax=inode"); + } + ext4_show_quota_options(seq, sb); return 0; } @@ -2383,6 +2445,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ext4_msg(sb, KERN_ERR, "revision level too high, " "forcing read-only mode"); err = -EROFS; + goto done; } if (read_only) goto done; @@ -4017,7 +4080,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "both data=journal and delalloc"); goto failed_mount; } - if (test_opt(sb, DAX)) { + if (test_opt(sb, DAX_ALWAYS)) { ext4_msg(sb, KERN_ERR, "can't mount with " "both data=journal and dax"); goto failed_mount; @@ -4127,13 +4190,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { + if (bdev_dax_supported(sb->s_bdev, blocksize)) + set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags); + + if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) { if (ext4_has_feature_inline_data(sb)) { ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem" " that may contain inline data"); goto failed_mount; } - if (!bdev_dax_supported(sb->s_bdev, blocksize)) { + if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) { ext4_msg(sb, KERN_ERR, "DAX unsupported by block device."); goto failed_mount; @@ -5447,12 +5513,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; goto restore_opts; } - if (test_opt(sb, DAX)) { - ext4_msg(sb, KERN_ERR, "can't mount with " - "both data=journal and dax"); - err = -EINVAL; - goto restore_opts; - } } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) { if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { ext4_msg(sb, KERN_ERR, "can't mount with " @@ -5468,12 +5528,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { - ext4_msg(sb, KERN_WARNING, "warning: refusing change of " - "dax flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT4_MOUNT_DAX; - } - if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user"); diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index dec1244dd062..bbd5e7e0632b 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -113,6 +113,9 @@ static int ext4_begin_enable_verity(struct file *filp) handle_t *handle; int err; + if (IS_DAX(inode) || ext4_test_inode_flag(inode, EXT4_INODE_DAX)) + return -EINVAL; + if (ext4_verity_in_progress(inode)) return -EBUSY; @@ -241,7 +244,7 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc, if (err) goto out_stop; ext4_set_inode_flag(inode, EXT4_INODE_VERITY); - ext4_set_inode_flags(inode); + ext4_set_inode_flags(inode, false); err = ext4_mark_iloc_dirty(handle, inode, &iloc); } out_stop: diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 9b29a40738ac..7d2f6576d954 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -93,6 +93,7 @@ static const struct xattr_handler * const ext4_xattr_handler_map[] = { #ifdef CONFIG_EXT4_FS_SECURITY [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, #endif + [EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler, }; const struct xattr_handler *ext4_xattr_handlers[] = { @@ -105,6 +106,7 @@ const struct xattr_handler *ext4_xattr_handlers[] = { #ifdef CONFIG_EXT4_FS_SECURITY &ext4_xattr_security_handler, #endif + &ext4_xattr_hurd_handler, NULL }; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index ffe21ac77f78..730b91fa0dd7 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -124,6 +124,7 @@ struct ext4_xattr_inode_array { extern const struct xattr_handler ext4_xattr_user_handler; extern const struct xattr_handler ext4_xattr_trusted_handler; extern const struct xattr_handler ext4_xattr_security_handler; +extern const struct xattr_handler ext4_xattr_hurd_handler; #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c" diff --git a/fs/ext4/xattr_hurd.c b/fs/ext4/xattr_hurd.c new file mode 100644 index 000000000000..8cfa74a56361 --- /dev/null +++ b/fs/ext4/xattr_hurd.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/fs/ext4/xattr_hurd.c + * Handler for extended gnu attributes for the Hurd. + * + * Copyright (C) 2001 by Andreas Gruenbacher, + * Copyright (C) 2020 by Jan (janneke) Nieuwenhuizen, + */ + +#include +#include +#include "ext4.h" +#include "xattr.h" + +static bool +ext4_xattr_hurd_list(struct dentry *dentry) +{ + return test_opt(dentry->d_sb, XATTR_USER); +} + +static int +ext4_xattr_hurd_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + if (!test_opt(inode->i_sb, XATTR_USER)) + return -EOPNOTSUPP; + + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_HURD, + name, buffer, size); +} + +static int +ext4_xattr_hurd_set(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, const void *value, + size_t size, int flags) +{ + if (!test_opt(inode->i_sb, XATTR_USER)) + return -EOPNOTSUPP; + + return ext4_xattr_set(inode, EXT4_XATTR_INDEX_HURD, + name, value, size, flags); +} + +const struct xattr_handler ext4_xattr_hurd_handler = { + .prefix = XATTR_HURD_PREFIX, + .list = ext4_xattr_hurd_list, + .get = ext4_xattr_hurd_get, + .set = ext4_xattr_hurd_set, +}; diff --git a/fs/file_table.c b/fs/file_table.c index 656647f9575a..65603502fed6 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -230,7 +230,7 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, d_set_d_op(path.dentry, &anon_ops); path.mnt = mntget(mnt); d_instantiate(path.dentry, inode); - file = alloc_file(&path, flags, fops); + file = alloc_file(&path, flags | FMODE_NONOTIFY, fops); if (IS_ERR(file)) { ihold(inode); path_put(&path); diff --git a/fs/io-wq.c b/fs/io-wq.c index 0b65a912b036..47c5f3aeb460 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -903,13 +903,15 @@ void io_wq_cancel_all(struct io_wq *wq) struct io_cb_cancel_data { work_cancel_fn *fn; void *data; + int nr_running; + int nr_pending; + bool cancel_all; }; static bool io_wq_worker_cancel(struct io_worker *worker, void *data) { struct io_cb_cancel_data *match = data; unsigned long flags; - bool ret = false; /* * Hold the lock to avoid ->cur_work going out of scope, caller @@ -920,41 +922,69 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data) !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) && match->fn(worker->cur_work, match->data)) { send_sig(SIGINT, worker->task, 1); - ret = true; + match->nr_running++; } spin_unlock_irqrestore(&worker->lock, flags); - return ret; + return match->nr_running && !match->cancel_all; } -static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, - struct io_cb_cancel_data *match) +static void io_wqe_cancel_pending_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) { struct io_wq_work_node *node, *prev; struct io_wq_work *work; unsigned long flags; - bool found = false; + +retry: + spin_lock_irqsave(&wqe->lock, flags); + wq_list_for_each(node, prev, &wqe->work_list) { + work = container_of(node, struct io_wq_work, list); + if (!match->fn(work, match->data)) + continue; + + wq_list_del(&wqe->work_list, node, prev); + spin_unlock_irqrestore(&wqe->lock, flags); + io_run_cancel(work, wqe); + match->nr_pending++; + if (!match->cancel_all) + return; + + /* not safe to continue after unlock */ + goto retry; + } + spin_unlock_irqrestore(&wqe->lock, flags); +} + +static void io_wqe_cancel_running_work(struct io_wqe *wqe, + struct io_cb_cancel_data *match) +{ + rcu_read_lock(); + io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); + rcu_read_unlock(); +} + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data, bool cancel_all) +{ + struct io_cb_cancel_data match = { + .fn = cancel, + .data = data, + .cancel_all = cancel_all, + }; + int node; /* * First check pending list, if we're lucky we can just remove it * from there. CANCEL_OK means that the work is returned as-new, * no completion will be posted for it. */ - spin_lock_irqsave(&wqe->lock, flags); - wq_list_for_each(node, prev, &wqe->work_list) { - work = container_of(node, struct io_wq_work, list); + for_each_node(node) { + struct io_wqe *wqe = wq->wqes[node]; - if (match->fn(work, match->data)) { - wq_list_del(&wqe->work_list, node, prev); - found = true; - break; - } - } - spin_unlock_irqrestore(&wqe->lock, flags); - - if (found) { - io_run_cancel(work, wqe); - return IO_WQ_CANCEL_OK; + io_wqe_cancel_pending_work(wqe, &match); + if (match.nr_pending && !match.cancel_all) + return IO_WQ_CANCEL_OK; } /* @@ -963,31 +993,19 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe, * as an indication that we attempt to signal cancellation. The * completion will run normally in this case. */ - rcu_read_lock(); - found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match); - rcu_read_unlock(); - return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND; -} - -enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, - void *data) -{ - struct io_cb_cancel_data match = { - .fn = cancel, - .data = data, - }; - enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND; - int node; - for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; - ret = io_wqe_cancel_work(wqe, &match); - if (ret != IO_WQ_CANCEL_NOTFOUND) - break; + io_wqe_cancel_running_work(wqe, &match); + if (match.nr_running && !match.cancel_all) + return IO_WQ_CANCEL_RUNNING; } - return ret; + if (match.nr_running) + return IO_WQ_CANCEL_RUNNING; + if (match.nr_pending) + return IO_WQ_CANCEL_OK; + return IO_WQ_CANCEL_NOTFOUND; } static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data) @@ -997,21 +1015,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data) enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork) { - return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork); -} - -static bool io_wq_pid_match(struct io_wq_work *work, void *data) -{ - pid_t pid = (pid_t) (unsigned long) data; - - return work->task_pid == pid; -} - -enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid) -{ - void *data = (void *) (unsigned long) pid; - - return io_wq_cancel_cb(wq, io_wq_pid_match, data); + return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false); } struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) diff --git a/fs/io-wq.h b/fs/io-wq.h index 8e138fa88b9f..071f1a997800 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -90,7 +90,6 @@ struct io_wq_work { const struct cred *creds; struct fs_struct *fs; unsigned flags; - pid_t task_pid; }; static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) @@ -125,12 +124,11 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work) void io_wq_cancel_all(struct io_wq *wq); enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); -enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid); typedef bool (work_cancel_fn)(struct io_wq_work *, void *); enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, - void *data); + void *data, bool cancel_all); struct task_struct *io_wq_get_task(struct io_wq *wq); diff --git a/fs/io_uring.c b/fs/io_uring.c index 155f3d830ddb..e507737f044e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -541,6 +541,7 @@ enum { REQ_F_NO_FILE_TABLE_BIT, REQ_F_QUEUE_TIMEOUT_BIT, REQ_F_WORK_INITIALIZED_BIT, + REQ_F_TASK_PINNED_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -598,6 +599,8 @@ enum { REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT), /* io_wq_work is initialized */ REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), + /* req->task is refcounted */ + REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT), }; struct async_poll { @@ -887,6 +890,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); static int io_grab_files(struct io_kiocb *req); +static void io_complete_rw_common(struct kiocb *kiocb, long res); static void io_cleanup_req(struct io_kiocb *req); static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, int fd, struct file **out_file, bool fixed); @@ -910,6 +914,21 @@ struct sock *io_uring_get_socket(struct file *file) } EXPORT_SYMBOL(io_uring_get_socket); +static void io_get_req_task(struct io_kiocb *req) +{ + if (req->flags & REQ_F_TASK_PINNED) + return; + get_task_struct(req->task); + req->flags |= REQ_F_TASK_PINNED; +} + +/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */ +static void __io_put_req_task(struct io_kiocb *req) +{ + if (req->flags & REQ_F_TASK_PINNED) + put_task_struct(req->task); +} + static void io_file_put_work(struct work_struct *work); /* @@ -1045,8 +1064,6 @@ static inline void io_req_work_grab_env(struct io_kiocb *req, } spin_unlock(¤t->fs->lock); } - if (!req->work.task_pid) - req->work.task_pid = task_pid_vnr(current); } static inline void io_req_work_drop_env(struct io_kiocb *req) @@ -1087,6 +1104,7 @@ static inline void io_prep_async_work(struct io_kiocb *req, req->work.flags |= IO_WQ_WORK_UNBOUND; } + io_req_init_async(req); io_req_work_grab_env(req, def); *link = io_prep_linked_timeout(req); @@ -1398,9 +1416,7 @@ static void __io_req_aux_free(struct io_kiocb *req) kfree(req->io); if (req->file) io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - if (req->task) - put_task_struct(req->task); - + __io_put_req_task(req); io_req_work_drop_env(req); } @@ -1727,6 +1743,26 @@ static int io_put_kbuf(struct io_kiocb *req) return cflags; } +static void io_iopoll_queue(struct list_head *again) +{ + struct io_kiocb *req; + + do { + req = list_first_entry(again, struct io_kiocb, list); + list_del(&req->list); + + /* shouldn't happen unless io_uring is dying, cancel reqs */ + if (unlikely(!current->mm)) { + io_complete_rw_common(&req->rw.kiocb, -EAGAIN); + io_put_req(req); + continue; + } + + refcount_inc(&req->refs); + io_queue_async_work(req); + } while (!list_empty(again)); +} + /* * Find and free completed poll iocbs */ @@ -1735,12 +1771,21 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct req_batch rb; struct io_kiocb *req; + LIST_HEAD(again); + + /* order with ->result store in io_complete_rw_iopoll() */ + smp_rmb(); rb.to_free = rb.need_iter = 0; while (!list_empty(done)) { int cflags = 0; req = list_first_entry(done, struct io_kiocb, list); + if (READ_ONCE(req->result) == -EAGAIN) { + req->iopoll_completed = 0; + list_move_tail(&req->list, &again); + continue; + } list_del(&req->list); if (req->flags & REQ_F_BUFFER_SELECTED) @@ -1758,18 +1803,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, if (ctx->flags & IORING_SETUP_SQPOLL) io_cqring_ev_posted(ctx); io_free_req_many(ctx, &rb); -} -static void io_iopoll_queue(struct list_head *again) -{ - struct io_kiocb *req; - - do { - req = list_first_entry(again, struct io_kiocb, list); - list_del(&req->list); - refcount_inc(&req->refs); - io_queue_async_work(req); - } while (!list_empty(again)); + if (!list_empty(&again)) + io_iopoll_queue(&again); } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, @@ -1777,7 +1813,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct io_kiocb *req, *tmp; LIST_HEAD(done); - LIST_HEAD(again); bool spin; int ret; @@ -1803,13 +1838,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - if (req->result == -EAGAIN) { - list_move_tail(&req->list, &again); - continue; - } - if (!list_empty(&again)) - break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); if (ret < 0) break; @@ -1822,9 +1850,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); - if (!list_empty(&again)) - io_iopoll_queue(&again); - return ret; } @@ -1973,11 +1998,13 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) if (kiocb->ki_flags & IOCB_WRITE) kiocb_end_write(req); - if (res != req->result) + if (res != -EAGAIN && res != req->result) req_set_fail_links(req); - req->result = res; - if (res != -EAGAIN) - WRITE_ONCE(req->iopoll_completed, 1); + + WRITE_ONCE(req->result, res); + /* order with io_poll_complete() checking ->result */ + smp_wmb(); + WRITE_ONCE(req->iopoll_completed, 1); } /* @@ -2650,8 +2677,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock) } } out_free: - kfree(iovec); - req->flags &= ~REQ_F_NEED_CLEANUP; + if (!(req->flags & REQ_F_NEED_CLEANUP)) + kfree(iovec); return ret; } @@ -2773,8 +2800,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock) } } out_free: - req->flags &= ~REQ_F_NEED_CLEANUP; - kfree(iovec); + if (!(req->flags & REQ_F_NEED_CLEANUP)) + kfree(iovec); return ret; } @@ -4236,6 +4263,28 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, __io_queue_proc(&pt->req->apoll->poll, pt, head); } +static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) +{ + struct mm_struct *mm = current->mm; + + if (mm) { + kthread_unuse_mm(mm); + mmput(mm); + } +} + +static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + if (io_op_defs[req->opcode].needs_mm && !current->mm) { + if (unlikely(!mmget_not_zero(ctx->sqo_mm))) + return -EFAULT; + kthread_use_mm(ctx->sqo_mm); + } + + return 0; +} + static void io_async_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); @@ -4270,11 +4319,16 @@ static void io_async_task_func(struct callback_head *cb) if (!canceled) { __set_current_state(TASK_RUNNING); + if (io_sq_thread_acquire_mm(ctx, req)) { + io_cqring_add_event(req, -EFAULT); + goto end_req; + } mutex_lock(&ctx->uring_lock); __io_queue_sqe(req, NULL); mutex_unlock(&ctx->uring_lock); } else { io_cqring_ev_posted(ctx); +end_req: req_set_fail_links(req); io_double_put_req(req); } @@ -4366,8 +4420,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req) memcpy(&apoll->work, &req->work, sizeof(req->work)); had_io = req->io != NULL; - get_task_struct(current); - req->task = current; + io_get_req_task(req); req->apoll = apoll; INIT_HLIST_NODE(&req->hash_node); @@ -4555,8 +4608,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe events = READ_ONCE(sqe->poll_events); poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; - get_task_struct(current); - req->task = current; + io_get_req_task(req); return 0; } @@ -4772,7 +4824,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) enum io_wq_cancel cancel_ret; int ret = 0; - cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr); + cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false); switch (cancel_ret) { case IO_WQ_CANCEL_OK: ret = 0; @@ -5308,9 +5360,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { const bool in_async = io_wq_current_is_worker(); - if (req->result == -EAGAIN) - return -EAGAIN; - /* workqueue context doesn't hold uring_lock, grab it now */ if (in_async) mutex_lock(&ctx->uring_lock); @@ -5817,17 +5866,14 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->flags = 0; /* one is dropped after submission, the other at completion */ refcount_set(&req->refs, 2); - req->task = NULL; + req->task = current; req->result = 0; if (unlikely(req->opcode >= IORING_OP_LAST)) return -EINVAL; - if (io_op_defs[req->opcode].needs_mm && !current->mm) { - if (unlikely(!mmget_not_zero(ctx->sqo_mm))) - return -EFAULT; - kthread_use_mm(ctx->sqo_mm); - } + if (unlikely(io_sq_thread_acquire_mm(ctx, req))) + return -EFAULT; sqe_flags = READ_ONCE(sqe->flags); /* enforce forwards compatibility on users */ @@ -5936,16 +5982,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, return submitted; } -static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) -{ - struct mm_struct *mm = current->mm; - - if (mm) { - kthread_unuse_mm(mm); - mmput(mm); - } -} - static int io_sq_thread(void *data) { struct io_ring_ctx *ctx = data; @@ -5979,7 +6015,7 @@ static int io_sq_thread(void *data) * If submit got -EBUSY, flag us as needing the application * to enter the kernel to reap and flush events. */ - if (!to_submit || ret == -EBUSY) { + if (!to_submit || ret == -EBUSY || need_resched()) { /* * Drop cur_mm before scheduling, we can't hold it for * long periods (or over schedule()). Do this before @@ -5995,7 +6031,7 @@ static int io_sq_thread(void *data) * more IO, we should wait for the application to * reap events and wake us up. */ - if (!list_empty(&ctx->poll_list) || + if (!list_empty(&ctx->poll_list) || need_resched() || (!time_after(jiffies, timeout) && ret != -EBUSY && !percpu_ref_is_dying(&ctx->refs))) { if (current->task_works) @@ -7331,7 +7367,17 @@ static void io_ring_exit_work(struct work_struct *work) if (ctx->rings) io_cqring_overflow_flush(ctx, true); - wait_for_completion(&ctx->ref_comp); + /* + * If we're doing polled IO and end up having requests being + * submitted async (out-of-line), then completions can come in while + * we're waiting for refs to drop. We need to reap these manually, + * as nobody else will be looking for them. + */ + while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)) { + io_iopoll_reap_events(ctx); + if (ctx->rings) + io_cqring_overflow_flush(ctx, true); + } io_ring_ctx_free(ctx); } @@ -7365,9 +7411,22 @@ static int io_uring_release(struct inode *inode, struct file *file) return 0; } +static bool io_wq_files_match(struct io_wq_work *work, void *data) +{ + struct files_struct *files = data; + + return work->files == files; +} + static void io_uring_cancel_files(struct io_ring_ctx *ctx, struct files_struct *files) { + if (list_empty_careful(&ctx->inflight_list)) + return; + + /* cancel all at once, should be faster than doing it one by one*/ + io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); + while (!list_empty_careful(&ctx->inflight_list)) { struct io_kiocb *cancel_req = NULL, *req; DEFINE_WAIT(wait); @@ -7423,6 +7482,14 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, } } +static bool io_cancel_task_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct task_struct *task = data; + + return req->task == task; +} + static int io_uring_flush(struct file *file, void *data) { struct io_ring_ctx *ctx = file->private_data; @@ -7433,7 +7500,7 @@ static int io_uring_flush(struct file *file, void *data) * If the task is going away, cancel work it may have pending */ if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) - io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current)); + io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true); return 0; } diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a49d0e670ddf..e4944436e733 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1140,6 +1140,7 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); + mutex_init(&journal->j_abort_mutex); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); @@ -1402,7 +1403,8 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) printk(KERN_ERR "JBD2: Error %d detected when updating " "journal superblock for %s.\n", ret, journal->j_devname); - jbd2_journal_abort(journal, ret); + if (!is_journal_aborted(journal)) + jbd2_journal_abort(journal, ret); } return ret; @@ -2153,6 +2155,13 @@ void jbd2_journal_abort(journal_t *journal, int errno) { transaction_t *transaction; + /* + * Lock the aborting procedure until everything is done, this avoid + * races between filesystem's error handling flow (e.g. ext4_abort()), + * ensure panic after the error info is written into journal's + * superblock. + */ + mutex_lock(&journal->j_abort_mutex); /* * ESHUTDOWN always takes precedence because a file system check * caused by any other journal abort error is not required after @@ -2167,6 +2176,7 @@ void jbd2_journal_abort(journal_t *journal, int errno) journal->j_errno = errno; jbd2_journal_update_sb_errno(journal); } + mutex_unlock(&journal->j_abort_mutex); return; } @@ -2188,10 +2198,7 @@ void jbd2_journal_abort(journal_t *journal, int errno) * layer could realise that a filesystem check is needed. */ jbd2_journal_update_sb_errno(journal); - - write_lock(&journal->j_state_lock); - journal->j_flags |= JBD2_REC_ERR; - write_unlock(&journal->j_state_lock); + mutex_unlock(&journal->j_abort_mutex); } /** diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 0637271f3770..8ff4d1a1e774 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h @@ -259,7 +259,7 @@ struct jffs2_full_dirent uint32_t ino; /* == zero for unlink */ unsigned int nhash; unsigned char type; - unsigned char name[0]; + unsigned char name[]; }; /* diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h index 60207a2ae952..e4131cb1f1d4 100644 --- a/fs/jffs2/summary.h +++ b/fs/jffs2/summary.h @@ -61,7 +61,7 @@ struct jffs2_sum_dirent_flash jint32_t ino; /* == zero for unlink */ uint8_t nsize; /* dirent name size */ uint8_t type; /* dirent type */ - uint8_t name[0]; /* dirent name */ + uint8_t name[]; /* dirent name */ } __attribute__((packed)); struct jffs2_sum_xattr_flash @@ -117,7 +117,7 @@ struct jffs2_sum_dirent_mem jint32_t ino; /* == zero for unlink */ uint8_t nsize; /* dirent name size */ uint8_t type; /* dirent type */ - uint8_t name[0]; /* dirent name */ + uint8_t name[]; /* dirent name */ } __attribute__((packed)); struct jffs2_sum_xattr_mem diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1b79dd5cf661..3d113cf8908a 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -267,8 +267,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) { struct inode *inode = dreq->inode; - inode_dio_end(inode); - if (dreq->iocb) { long res = (long) dreq->error; if (dreq->count != 0) { @@ -280,7 +278,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) complete(&dreq->completion); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); } static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) @@ -410,8 +411,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_end(inode); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); return result < 0 ? result : -EIO; } @@ -864,8 +867,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_end(inode); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); return result < 0 ? result : -EIO; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f96367a2463e..ccd6c1637b27 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -83,6 +83,7 @@ nfs_file_release(struct inode *inode, struct file *filp) dprintk("NFS: release(%pD2)\n", filp); nfs_inc_stats(inode, NFSIOS_VFSRELEASE); + inode_dio_wait(inode); nfs_file_clear_open_context(filp); return 0; } diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 7d399f72ebbb..de03e440b7ee 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -907,9 +907,8 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, goto out_mds; /* Use a direct mapping of ds_idx to pgio mirror_idx */ - if (WARN_ON_ONCE(pgio->pg_mirror_count != - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) - goto out_mds; + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; for (i = 0; i < pgio->pg_mirror_count; i++) { mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); @@ -931,7 +930,10 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) pgio->pg_maxretrans = io_maxretrans; return; - +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; out_mds: trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_RW, @@ -941,6 +943,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, pgio->pg_lseg = NULL; pgio->pg_maxretrans = 0; nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; } static unsigned int diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index a3ab6e219061..873342308dc0 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -308,6 +308,7 @@ static int try_location(struct fs_context *fc, if (IS_ERR(export_path)) return PTR_ERR(export_path); + kfree(ctx->nfs_server.export_path); ctx->nfs_server.export_path = export_path; source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1, diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 152a0fc4e905..751bc4dc7466 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res, &ocfs2_nfs_sync_lops, osb); } +static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb) +{ + ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + init_rwsem(&osb->nfs_sync_rwlock); +} + void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb) { struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres; @@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) if (ocfs2_is_hard_readonly(osb)) return -EROFS; + if (ex) + down_write(&osb->nfs_sync_rwlock); + else + down_read(&osb->nfs_sync_rwlock); + if (ocfs2_mount_local(osb)) return 0; @@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex) if (!ocfs2_mount_local(osb)) ocfs2_cluster_unlock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); } int ocfs2_trim_fs_lock(struct ocfs2_super *osb, @@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) local: ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); - ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb); + ocfs2_nfs_sync_lock_init(osb); ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb); osb->cconn = conn; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index ee5d98516212..2dd71d626196 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -395,6 +395,7 @@ struct ocfs2_super struct ocfs2_lock_res osb_super_lockres; struct ocfs2_lock_res osb_rename_lockres; struct ocfs2_lock_res osb_nfs_sync_lockres; + struct rw_semaphore nfs_sync_rwlock; struct ocfs2_lock_res osb_trim_fs_lockres; struct mutex obs_trim_fs_mutex; struct ocfs2_dlm_debug *osb_dlm_debug; diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 0dd8c41bafd4..19137c6d087b 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -290,7 +290,7 @@ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ -#define OCFS2_INVALID_SLOT -1 +#define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 @@ -326,8 +326,8 @@ struct ocfs2_system_inode_info { enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 4836becb7578..45745cc3408a 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) goto bail; } - inode_alloc_inode = - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, - suballoc_slot); + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + else + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + INODE_ALLOC_SYSTEM_INODE, suballoc_slot); if (!inode_alloc_inode) { /* the error code could be inaccurate, but we are not able to * get the correct one. */ diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c index 9955d75c0585..ad31ec4ad627 100644 --- a/fs/proc/bootconfig.c +++ b/fs/proc/bootconfig.c @@ -26,8 +26,9 @@ static int boot_config_proc_show(struct seq_file *m, void *v) static int __init copy_xbc_key_value_list(char *dst, size_t size) { struct xbc_node *leaf, *vnode; - const char *val; char *key, *end = dst + size; + const char *val; + char q; int ret = 0; key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL); @@ -41,16 +42,20 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size) break; dst += ret; vnode = xbc_node_get_child(leaf); - if (vnode && xbc_node_is_array(vnode)) { + if (vnode) { xbc_array_for_each_value(vnode, val) { - ret = snprintf(dst, rest(dst, end), "\"%s\"%s", - val, vnode->next ? ", " : "\n"); + if (strchr(val, '"')) + q = '\''; + else + q = '"'; + ret = snprintf(dst, rest(dst, end), "%c%s%c%s", + q, val, q, vnode->next ? ", " : "\n"); if (ret < 0) goto out; dst += ret; } } else { - ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val); + ret = snprintf(dst, rest(dst, end), "\"\"\n"); if (ret < 0) break; dst += ret; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 8ba492d44e68..e502414b3556 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -512,7 +512,8 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) * Using bounce buffer to bypass the * hardened user copy kernel text checks. */ - if (probe_kernel_read(buf, (void *) start, tsz)) { + if (copy_from_kernel_nofault(buf, (void *)start, + tsz)) { if (clear_user(buffer, tsz)) { ret = -EFAULT; goto out; diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 7187bd1a30ea..8d64edb80ebf 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -262,7 +262,7 @@ struct squashfs_dir_index { __le32 index; __le32 start_block; __le32 size; - unsigned char name[0]; + unsigned char name[]; }; struct squashfs_base_inode { @@ -327,7 +327,7 @@ struct squashfs_symlink_inode { __le32 inode_number; __le32 nlink; __le32 symlink_size; - char symlink[0]; + char symlink[]; }; struct squashfs_reg_inode { @@ -341,7 +341,7 @@ struct squashfs_reg_inode { __le32 fragment; __le32 offset; __le32 file_size; - __le16 block_list[0]; + __le16 block_list[]; }; struct squashfs_lreg_inode { @@ -358,7 +358,7 @@ struct squashfs_lreg_inode { __le32 fragment; __le32 offset; __le32 xattr; - __le16 block_list[0]; + __le16 block_list[]; }; struct squashfs_dir_inode { @@ -389,7 +389,7 @@ struct squashfs_ldir_inode { __le16 i_count; __le16 offset; __le32 xattr; - struct squashfs_dir_index index[0]; + struct squashfs_dir_index index[]; }; union squashfs_inode { @@ -410,7 +410,7 @@ struct squashfs_dir_entry { __le16 inode_number; __le16 type; __le16 size; - char name[0]; + char name[]; }; struct squashfs_dir_header { @@ -428,12 +428,12 @@ struct squashfs_fragment_entry { struct squashfs_xattr_entry { __le16 type; __le16 size; - char data[0]; + char data[]; }; struct squashfs_xattr_val { __le32 vsize; - char value[0]; + char value[]; }; struct squashfs_xattr_id { diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 907fa5d16494..4a674db4e1fa 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -2,6 +2,11 @@ #ifndef _ASM_GENERIC_CACHEFLUSH_H #define _ASM_GENERIC_CACHEFLUSH_H +struct mm_struct; +struct vm_area_struct; +struct page; +struct address_space; + /* * The cache doesn't need to be flushed when TLB entries change when * the cache is mapped to physical memory, not virtual memory diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 40f85decc2ee..8e1e6244a89d 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, #ifndef __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { - return READ_ONCE(*ptep); + return ptep_get(ptep); } #endif diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 27bdd273fc4e..77941efb5426 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -89,7 +89,7 @@ struct displayid_detailed_timings_1 { struct displayid_detailed_timing_block { struct displayid_block base; - struct displayid_detailed_timings_1 timings[0]; + struct displayid_detailed_timings_1 timings[]; }; #define for_each_displayid_db(displayid, block, idx, length) \ diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h index 9e9ccb20d586..38afb341c3f2 100644 --- a/include/keys/encrypted-type.h +++ b/include/keys/encrypted-type.h @@ -27,7 +27,7 @@ struct encrypted_key_payload { unsigned short payload_datalen; /* payload data length */ unsigned short encrypted_key_format; /* encrypted key format */ u8 *decrypted_data; /* decrypted data */ - u8 payload_data[0]; /* payload data + datablob + hmac */ + u8 payload_data[]; /* payload data + datablob + hmac */ }; extern struct key_type key_type_encrypted; diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index a183278c3e9e..2b0b15a71228 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -28,7 +28,7 @@ struct rxkad_key { u8 primary_flag; /* T if key for primary cell for this user */ u16 ticket_len; /* length of ticket[] */ u8 session_key[8]; /* DES session key */ - u8 ticket[0]; /* the encrypted ticket */ + u8 ticket[]; /* the encrypted ticket */ }; /* @@ -100,7 +100,7 @@ struct rxrpc_key_data_v1 { u32 expiry; /* time_t */ u32 kvno; u8 session_key[8]; - u8 ticket[0]; + u8 ticket[]; }; /* diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index 2c4927bf7b8d..fd525c71d676 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -77,6 +77,9 @@ #endif /* cmpxchg64_relaxed */ +#define arch_atomic_read atomic_read +#define arch_atomic_read_acquire atomic_read_acquire + #ifndef atomic_read_acquire static __always_inline int atomic_read_acquire(const atomic_t *v) @@ -86,6 +89,9 @@ atomic_read_acquire(const atomic_t *v) #define atomic_read_acquire atomic_read_acquire #endif +#define arch_atomic_set atomic_set +#define arch_atomic_set_release atomic_set_release + #ifndef atomic_set_release static __always_inline void atomic_set_release(atomic_t *v, int i) @@ -95,6 +101,13 @@ atomic_set_release(atomic_t *v, int i) #define atomic_set_release atomic_set_release #endif +#define arch_atomic_add atomic_add + +#define arch_atomic_add_return atomic_add_return +#define arch_atomic_add_return_acquire atomic_add_return_acquire +#define arch_atomic_add_return_release atomic_add_return_release +#define arch_atomic_add_return_relaxed atomic_add_return_relaxed + #ifndef atomic_add_return_relaxed #define atomic_add_return_acquire atomic_add_return #define atomic_add_return_release atomic_add_return @@ -137,6 +150,11 @@ atomic_add_return(int i, atomic_t *v) #endif /* atomic_add_return_relaxed */ +#define arch_atomic_fetch_add atomic_fetch_add +#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release atomic_fetch_add_release +#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed + #ifndef atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add #define atomic_fetch_add_release atomic_fetch_add @@ -179,6 +197,13 @@ atomic_fetch_add(int i, atomic_t *v) #endif /* atomic_fetch_add_relaxed */ +#define arch_atomic_sub atomic_sub + +#define arch_atomic_sub_return atomic_sub_return +#define arch_atomic_sub_return_acquire atomic_sub_return_acquire +#define arch_atomic_sub_return_release atomic_sub_return_release +#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed + #ifndef atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return #define atomic_sub_return_release atomic_sub_return @@ -221,6 +246,11 @@ atomic_sub_return(int i, atomic_t *v) #endif /* atomic_sub_return_relaxed */ +#define arch_atomic_fetch_sub atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release atomic_fetch_sub_release +#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed + #ifndef atomic_fetch_sub_relaxed #define atomic_fetch_sub_acquire atomic_fetch_sub #define atomic_fetch_sub_release atomic_fetch_sub @@ -263,6 +293,8 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ +#define arch_atomic_inc atomic_inc + #ifndef atomic_inc static __always_inline void atomic_inc(atomic_t *v) @@ -272,6 +304,11 @@ atomic_inc(atomic_t *v) #define atomic_inc atomic_inc #endif +#define arch_atomic_inc_return atomic_inc_return +#define arch_atomic_inc_return_acquire atomic_inc_return_acquire +#define arch_atomic_inc_return_release atomic_inc_return_release +#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed + #ifndef atomic_inc_return_relaxed #ifdef atomic_inc_return #define atomic_inc_return_acquire atomic_inc_return @@ -353,6 +390,11 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_inc_return_relaxed */ +#define arch_atomic_fetch_inc atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#define arch_atomic_fetch_inc_release atomic_fetch_inc_release +#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed + #ifndef atomic_fetch_inc_relaxed #ifdef atomic_fetch_inc #define atomic_fetch_inc_acquire atomic_fetch_inc @@ -434,6 +476,8 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ +#define arch_atomic_dec atomic_dec + #ifndef atomic_dec static __always_inline void atomic_dec(atomic_t *v) @@ -443,6 +487,11 @@ atomic_dec(atomic_t *v) #define atomic_dec atomic_dec #endif +#define arch_atomic_dec_return atomic_dec_return +#define arch_atomic_dec_return_acquire atomic_dec_return_acquire +#define arch_atomic_dec_return_release atomic_dec_return_release +#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed + #ifndef atomic_dec_return_relaxed #ifdef atomic_dec_return #define atomic_dec_return_acquire atomic_dec_return @@ -524,6 +573,11 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_dec_return_relaxed */ +#define arch_atomic_fetch_dec atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#define arch_atomic_fetch_dec_release atomic_fetch_dec_release +#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed + #ifndef atomic_fetch_dec_relaxed #ifdef atomic_fetch_dec #define atomic_fetch_dec_acquire atomic_fetch_dec @@ -605,6 +659,13 @@ atomic_fetch_dec(atomic_t *v) #endif /* atomic_fetch_dec_relaxed */ +#define arch_atomic_and atomic_and + +#define arch_atomic_fetch_and atomic_fetch_and +#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release atomic_fetch_and_release +#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed + #ifndef atomic_fetch_and_relaxed #define atomic_fetch_and_acquire atomic_fetch_and #define atomic_fetch_and_release atomic_fetch_and @@ -647,6 +708,8 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ +#define arch_atomic_andnot atomic_andnot + #ifndef atomic_andnot static __always_inline void atomic_andnot(int i, atomic_t *v) @@ -656,6 +719,11 @@ atomic_andnot(int i, atomic_t *v) #define atomic_andnot atomic_andnot #endif +#define arch_atomic_fetch_andnot atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed + #ifndef atomic_fetch_andnot_relaxed #ifdef atomic_fetch_andnot #define atomic_fetch_andnot_acquire atomic_fetch_andnot @@ -737,6 +805,13 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot_relaxed */ +#define arch_atomic_or atomic_or + +#define arch_atomic_fetch_or atomic_fetch_or +#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release atomic_fetch_or_release +#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed + #ifndef atomic_fetch_or_relaxed #define atomic_fetch_or_acquire atomic_fetch_or #define atomic_fetch_or_release atomic_fetch_or @@ -779,6 +854,13 @@ atomic_fetch_or(int i, atomic_t *v) #endif /* atomic_fetch_or_relaxed */ +#define arch_atomic_xor atomic_xor + +#define arch_atomic_fetch_xor atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release atomic_fetch_xor_release +#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed + #ifndef atomic_fetch_xor_relaxed #define atomic_fetch_xor_acquire atomic_fetch_xor #define atomic_fetch_xor_release atomic_fetch_xor @@ -821,6 +903,11 @@ atomic_fetch_xor(int i, atomic_t *v) #endif /* atomic_fetch_xor_relaxed */ +#define arch_atomic_xchg atomic_xchg +#define arch_atomic_xchg_acquire atomic_xchg_acquire +#define arch_atomic_xchg_release atomic_xchg_release +#define arch_atomic_xchg_relaxed atomic_xchg_relaxed + #ifndef atomic_xchg_relaxed #define atomic_xchg_acquire atomic_xchg #define atomic_xchg_release atomic_xchg @@ -863,6 +950,11 @@ atomic_xchg(atomic_t *v, int i) #endif /* atomic_xchg_relaxed */ +#define arch_atomic_cmpxchg atomic_cmpxchg +#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#define arch_atomic_cmpxchg_release atomic_cmpxchg_release +#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed + #ifndef atomic_cmpxchg_relaxed #define atomic_cmpxchg_acquire atomic_cmpxchg #define atomic_cmpxchg_release atomic_cmpxchg @@ -905,6 +997,11 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_cmpxchg_relaxed */ +#define arch_atomic_try_cmpxchg atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed + #ifndef atomic_try_cmpxchg_relaxed #ifdef atomic_try_cmpxchg #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg @@ -1002,6 +1099,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif /* atomic_try_cmpxchg_relaxed */ +#define arch_atomic_sub_and_test atomic_sub_and_test + #ifndef atomic_sub_and_test /** * atomic_sub_and_test - subtract value from variable and test result @@ -1020,6 +1119,8 @@ atomic_sub_and_test(int i, atomic_t *v) #define atomic_sub_and_test atomic_sub_and_test #endif +#define arch_atomic_dec_and_test atomic_dec_and_test + #ifndef atomic_dec_and_test /** * atomic_dec_and_test - decrement and test @@ -1037,6 +1138,8 @@ atomic_dec_and_test(atomic_t *v) #define atomic_dec_and_test atomic_dec_and_test #endif +#define arch_atomic_inc_and_test atomic_inc_and_test + #ifndef atomic_inc_and_test /** * atomic_inc_and_test - increment and test @@ -1054,6 +1157,8 @@ atomic_inc_and_test(atomic_t *v) #define atomic_inc_and_test atomic_inc_and_test #endif +#define arch_atomic_add_negative atomic_add_negative + #ifndef atomic_add_negative /** * atomic_add_negative - add and test if negative @@ -1072,6 +1177,8 @@ atomic_add_negative(int i, atomic_t *v) #define atomic_add_negative atomic_add_negative #endif +#define arch_atomic_fetch_add_unless atomic_fetch_add_unless + #ifndef atomic_fetch_add_unless /** * atomic_fetch_add_unless - add unless the number is already a given value @@ -1097,6 +1204,8 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) #define atomic_fetch_add_unless atomic_fetch_add_unless #endif +#define arch_atomic_add_unless atomic_add_unless + #ifndef atomic_add_unless /** * atomic_add_unless - add unless the number is already a given value @@ -1115,6 +1224,8 @@ atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_unless atomic_add_unless #endif +#define arch_atomic_inc_not_zero atomic_inc_not_zero + #ifndef atomic_inc_not_zero /** * atomic_inc_not_zero - increment unless the number is zero @@ -1131,6 +1242,8 @@ atomic_inc_not_zero(atomic_t *v) #define atomic_inc_not_zero atomic_inc_not_zero #endif +#define arch_atomic_inc_unless_negative atomic_inc_unless_negative + #ifndef atomic_inc_unless_negative static __always_inline bool atomic_inc_unless_negative(atomic_t *v) @@ -1147,6 +1260,8 @@ atomic_inc_unless_negative(atomic_t *v) #define atomic_inc_unless_negative atomic_inc_unless_negative #endif +#define arch_atomic_dec_unless_positive atomic_dec_unless_positive + #ifndef atomic_dec_unless_positive static __always_inline bool atomic_dec_unless_positive(atomic_t *v) @@ -1163,6 +1278,8 @@ atomic_dec_unless_positive(atomic_t *v) #define atomic_dec_unless_positive atomic_dec_unless_positive #endif +#define arch_atomic_dec_if_positive atomic_dec_if_positive + #ifndef atomic_dec_if_positive static __always_inline int atomic_dec_if_positive(atomic_t *v) @@ -1184,6 +1301,9 @@ atomic_dec_if_positive(atomic_t *v) #include #endif +#define arch_atomic64_read atomic64_read +#define arch_atomic64_read_acquire atomic64_read_acquire + #ifndef atomic64_read_acquire static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) @@ -1193,6 +1313,9 @@ atomic64_read_acquire(const atomic64_t *v) #define atomic64_read_acquire atomic64_read_acquire #endif +#define arch_atomic64_set atomic64_set +#define arch_atomic64_set_release atomic64_set_release + #ifndef atomic64_set_release static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) @@ -1202,6 +1325,13 @@ atomic64_set_release(atomic64_t *v, s64 i) #define atomic64_set_release atomic64_set_release #endif +#define arch_atomic64_add atomic64_add + +#define arch_atomic64_add_return atomic64_add_return +#define arch_atomic64_add_return_acquire atomic64_add_return_acquire +#define arch_atomic64_add_return_release atomic64_add_return_release +#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed + #ifndef atomic64_add_return_relaxed #define atomic64_add_return_acquire atomic64_add_return #define atomic64_add_return_release atomic64_add_return @@ -1244,6 +1374,11 @@ atomic64_add_return(s64 i, atomic64_t *v) #endif /* atomic64_add_return_relaxed */ +#define arch_atomic64_fetch_add atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release atomic64_fetch_add_release +#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed + #ifndef atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add #define atomic64_fetch_add_release atomic64_fetch_add @@ -1286,6 +1421,13 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #endif /* atomic64_fetch_add_relaxed */ +#define arch_atomic64_sub atomic64_sub + +#define arch_atomic64_sub_return atomic64_sub_return +#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release atomic64_sub_return_release +#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed + #ifndef atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return #define atomic64_sub_return_release atomic64_sub_return @@ -1328,6 +1470,11 @@ atomic64_sub_return(s64 i, atomic64_t *v) #endif /* atomic64_sub_return_relaxed */ +#define arch_atomic64_fetch_sub atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed + #ifndef atomic64_fetch_sub_relaxed #define atomic64_fetch_sub_acquire atomic64_fetch_sub #define atomic64_fetch_sub_release atomic64_fetch_sub @@ -1370,6 +1517,8 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ +#define arch_atomic64_inc atomic64_inc + #ifndef atomic64_inc static __always_inline void atomic64_inc(atomic64_t *v) @@ -1379,6 +1528,11 @@ atomic64_inc(atomic64_t *v) #define atomic64_inc atomic64_inc #endif +#define arch_atomic64_inc_return atomic64_inc_return +#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire +#define arch_atomic64_inc_return_release atomic64_inc_return_release +#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed + #ifndef atomic64_inc_return_relaxed #ifdef atomic64_inc_return #define atomic64_inc_return_acquire atomic64_inc_return @@ -1460,6 +1614,11 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_inc_return_relaxed */ +#define arch_atomic64_fetch_inc atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release +#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed + #ifndef atomic64_fetch_inc_relaxed #ifdef atomic64_fetch_inc #define atomic64_fetch_inc_acquire atomic64_fetch_inc @@ -1541,6 +1700,8 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ +#define arch_atomic64_dec atomic64_dec + #ifndef atomic64_dec static __always_inline void atomic64_dec(atomic64_t *v) @@ -1550,6 +1711,11 @@ atomic64_dec(atomic64_t *v) #define atomic64_dec atomic64_dec #endif +#define arch_atomic64_dec_return atomic64_dec_return +#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire +#define arch_atomic64_dec_return_release atomic64_dec_return_release +#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed + #ifndef atomic64_dec_return_relaxed #ifdef atomic64_dec_return #define atomic64_dec_return_acquire atomic64_dec_return @@ -1631,6 +1797,11 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_dec_return_relaxed */ +#define arch_atomic64_fetch_dec atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release +#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed + #ifndef atomic64_fetch_dec_relaxed #ifdef atomic64_fetch_dec #define atomic64_fetch_dec_acquire atomic64_fetch_dec @@ -1712,6 +1883,13 @@ atomic64_fetch_dec(atomic64_t *v) #endif /* atomic64_fetch_dec_relaxed */ +#define arch_atomic64_and atomic64_and + +#define arch_atomic64_fetch_and atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release atomic64_fetch_and_release +#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed + #ifndef atomic64_fetch_and_relaxed #define atomic64_fetch_and_acquire atomic64_fetch_and #define atomic64_fetch_and_release atomic64_fetch_and @@ -1754,6 +1932,8 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ +#define arch_atomic64_andnot atomic64_andnot + #ifndef atomic64_andnot static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) @@ -1763,6 +1943,11 @@ atomic64_andnot(s64 i, atomic64_t *v) #define atomic64_andnot atomic64_andnot #endif +#define arch_atomic64_fetch_andnot atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed + #ifndef atomic64_fetch_andnot_relaxed #ifdef atomic64_fetch_andnot #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot @@ -1844,6 +2029,13 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot_relaxed */ +#define arch_atomic64_or atomic64_or + +#define arch_atomic64_fetch_or atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release atomic64_fetch_or_release +#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed + #ifndef atomic64_fetch_or_relaxed #define atomic64_fetch_or_acquire atomic64_fetch_or #define atomic64_fetch_or_release atomic64_fetch_or @@ -1886,6 +2078,13 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #endif /* atomic64_fetch_or_relaxed */ +#define arch_atomic64_xor atomic64_xor + +#define arch_atomic64_fetch_xor atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed + #ifndef atomic64_fetch_xor_relaxed #define atomic64_fetch_xor_acquire atomic64_fetch_xor #define atomic64_fetch_xor_release atomic64_fetch_xor @@ -1928,6 +2127,11 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #endif /* atomic64_fetch_xor_relaxed */ +#define arch_atomic64_xchg atomic64_xchg +#define arch_atomic64_xchg_acquire atomic64_xchg_acquire +#define arch_atomic64_xchg_release atomic64_xchg_release +#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed + #ifndef atomic64_xchg_relaxed #define atomic64_xchg_acquire atomic64_xchg #define atomic64_xchg_release atomic64_xchg @@ -1970,6 +2174,11 @@ atomic64_xchg(atomic64_t *v, s64 i) #endif /* atomic64_xchg_relaxed */ +#define arch_atomic64_cmpxchg atomic64_cmpxchg +#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release +#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed + #ifndef atomic64_cmpxchg_relaxed #define atomic64_cmpxchg_acquire atomic64_cmpxchg #define atomic64_cmpxchg_release atomic64_cmpxchg @@ -2012,6 +2221,11 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_cmpxchg_relaxed */ +#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed + #ifndef atomic64_try_cmpxchg_relaxed #ifdef atomic64_try_cmpxchg #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg @@ -2109,6 +2323,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif /* atomic64_try_cmpxchg_relaxed */ +#define arch_atomic64_sub_and_test atomic64_sub_and_test + #ifndef atomic64_sub_and_test /** * atomic64_sub_and_test - subtract value from variable and test result @@ -2127,6 +2343,8 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) #define atomic64_sub_and_test atomic64_sub_and_test #endif +#define arch_atomic64_dec_and_test atomic64_dec_and_test + #ifndef atomic64_dec_and_test /** * atomic64_dec_and_test - decrement and test @@ -2144,6 +2362,8 @@ atomic64_dec_and_test(atomic64_t *v) #define atomic64_dec_and_test atomic64_dec_and_test #endif +#define arch_atomic64_inc_and_test atomic64_inc_and_test + #ifndef atomic64_inc_and_test /** * atomic64_inc_and_test - increment and test @@ -2161,6 +2381,8 @@ atomic64_inc_and_test(atomic64_t *v) #define atomic64_inc_and_test atomic64_inc_and_test #endif +#define arch_atomic64_add_negative atomic64_add_negative + #ifndef atomic64_add_negative /** * atomic64_add_negative - add and test if negative @@ -2179,6 +2401,8 @@ atomic64_add_negative(s64 i, atomic64_t *v) #define atomic64_add_negative atomic64_add_negative #endif +#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless + #ifndef atomic64_fetch_add_unless /** * atomic64_fetch_add_unless - add unless the number is already a given value @@ -2204,6 +2428,8 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif +#define arch_atomic64_add_unless atomic64_add_unless + #ifndef atomic64_add_unless /** * atomic64_add_unless - add unless the number is already a given value @@ -2222,6 +2448,8 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_add_unless atomic64_add_unless #endif +#define arch_atomic64_inc_not_zero atomic64_inc_not_zero + #ifndef atomic64_inc_not_zero /** * atomic64_inc_not_zero - increment unless the number is zero @@ -2238,6 +2466,8 @@ atomic64_inc_not_zero(atomic64_t *v) #define atomic64_inc_not_zero atomic64_inc_not_zero #endif +#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative + #ifndef atomic64_inc_unless_negative static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) @@ -2254,6 +2484,8 @@ atomic64_inc_unless_negative(atomic64_t *v) #define atomic64_inc_unless_negative atomic64_inc_unless_negative #endif +#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive + #ifndef atomic64_dec_unless_positive static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) @@ -2270,6 +2502,8 @@ atomic64_dec_unless_positive(atomic64_t *v) #define atomic64_dec_unless_positive atomic64_dec_unless_positive #endif +#define arch_atomic64_dec_if_positive atomic64_dec_if_positive + #ifndef atomic64_dec_if_positive static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) @@ -2288,4 +2522,4 @@ atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a +// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..900b9f4e0605 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -34,7 +34,7 @@ struct can_skb_priv { int ifindex; int skbcnt; - struct can_frame cf[0]; + struct can_frame cf[]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 60de3fedd3a7..405657a9a0d5 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -36,7 +36,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[0]; + struct cb710_slot slot[]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 2247e71beb83..e5ed1c541e7f 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -52,8 +52,7 @@ struct ceph_options { unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ unsigned long osd_request_timeout; /* jiffies */ - - u32 osd_req_flags; /* CEPH_OSD_FLAG_*, applied to each OSD request */ + u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ /* * any type that can't be simply compared or doesn't need @@ -76,6 +75,7 @@ struct ceph_options { #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ +#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ee37256ec8bd..5e55302e3bf6 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -33,6 +33,14 @@ #define __no_sanitize_thread #endif +#if __has_feature(undefined_behavior_sanitizer) +/* GCC does not have __SANITIZE_UNDEFINED__ */ +#define __no_sanitize_undefined \ + __attribute__((no_sanitize("undefined"))) +#else +#define __no_sanitize_undefined +#endif + /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7dd4e0349ef3..1c74464c80c6 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -150,6 +150,12 @@ #define __no_sanitize_thread #endif +#if __has_attribute(__no_sanitize_undefined__) +#define __no_sanitize_undefined __attribute__((no_sanitize_undefined)) +#else +#define __no_sanitize_undefined +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 30827f82ad62..204e76856435 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -123,7 +123,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #ifdef CONFIG_DEBUG_ENTRY /* Begin/end of an instrumentation safe region */ #define instrumentation_begin() ({ \ - asm volatile("%c0:\n\t" \ + asm volatile("%c0: nop\n\t" \ ".pushsection .discard.instr_begin\n\t" \ ".long %c0b - .\n\t" \ ".popsection\n\t" : : "i" (__COUNTER__)); \ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..c8f03d2969df 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -40,6 +40,7 @@ # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___fallthrough__ 0 #endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 21aed0981edf..c3bf7710f69a 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,20 +5,20 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) # define __kernel __attribute__((address_space(0))) +# define __user __attribute__((noderef, address_space(__user))) # define __safe __attribute__((safe)) # define __force __attribute__((force)) # define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) +# define __iomem __attribute__((noderef, address_space(__iomem))) # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) # define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); @@ -118,10 +118,6 @@ struct ftrace_likely_data { #define notrace __attribute__((__no_instrument_function__)) #endif -/* Section for code which can't be instrumented at all */ -#define noinstr \ - noinline notrace __attribute((__section__(".noinstr.text"))) - /* * it doesn't make sense on ARM (currently the only user of __naked) * to trace naked functions because then mcount is called without @@ -193,16 +189,18 @@ struct ftrace_likely_data { #define __no_kcsan __no_sanitize_thread #ifdef __SANITIZE_THREAD__ -# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused -# define __no_sanitize_or_inline __no_kcsan_or_inline -#else -# define __no_kcsan_or_inline __always_inline +# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused #endif #ifndef __no_sanitize_or_inline #define __no_sanitize_or_inline __always_inline #endif +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) \ + __no_kcsan __no_sanitize_address + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 136f984df0d9..cdfa400f89b3 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, - gfp_t gfp, unsigned long attrs); int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..6283917edd90 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -153,7 +153,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[0]; + struct data_chunk sgl[]; }; /** @@ -535,7 +535,7 @@ struct dmaengine_unmap_data { struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[0]; + dma_addr_t addr[]; }; struct dma_async_tx_descriptor; diff --git a/include/linux/efi.h b/include/linux/efi.h index 2c6495f72f79..bb35f3305e55 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -350,6 +350,7 @@ void efi_native_runtime_setup(void); * associated with ConOut */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) @@ -1236,14 +1237,11 @@ struct linux_efi_memreserve { struct { phys_addr_t base; phys_addr_t size; - } entry[0]; + } entry[]; }; -#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ - (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) - #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ - / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + / sizeof_field(struct linux_efi_memreserve, entry[0])) void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c4ab4dc1cd7..3f881a892ea7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2592,7 +2592,6 @@ extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); -extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); @@ -2608,7 +2607,6 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void kill_bdev(struct block_device *bdev) {} static inline void invalidate_bdev(struct block_device *bdev) {} static inline struct super_block *freeze_bdev(struct block_device *sb) diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index ce0b5fbf239d..3f0b19dcfae7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -46,7 +46,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[0]; /* tag name */ + char name[]; /* tag name */ }; /* diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c230b4e70d75..a3a568bf9686 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -48,6 +48,9 @@ struct host1x_client_ops { * @channel: host1x channel associated with this client * @syncpts: array of syncpoints requested for this client * @num_syncpts: number of syncpoints requested for this client + * @parent: pointer to parent structure + * @usecount: reference count for this structure + * @lock: mutex for mutually exclusive concurrency */ struct host1x_client { struct list_head list; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index c10617bb980a..b8b8963f8bb9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -408,7 +408,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * that are present. This information is used to grow the driver model tree. * For mainboards this is done statically using i2c_register_board_info(); * bus numbers identify adapters that aren't yet available. For add-on boards, - * i2c_new_device() does this dynamically with the adapter already known. + * i2c_new_client_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; @@ -439,13 +439,11 @@ struct i2c_board_info { #if IS_ENABLED(CONFIG_I2C) -/* Add-on boards should register/unregister their devices; e.g. a board +/* + * Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. */ -struct i2c_client * -i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); - struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4100bd224f5c..3e8fa1c7a1e6 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -41,6 +41,7 @@ #define DMA_PTE_SNP BIT_ULL(11) #define DMA_FL_PTE_PRESENT BIT_ULL(0) +#define DMA_FL_PTE_US BIT_ULL(2) #define DMA_FL_PTE_XD BIT_ULL(63) #define ADDR_WIDTH_5LEVEL (57) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 2735da5f839e..30823780c192 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -2,7 +2,7 @@ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H -#include +#include /* * An entry can be in one of four states: @@ -13,24 +13,14 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ -/* flags share CSD_FLAG_ space */ - -#define IRQ_WORK_PENDING BIT(0) -#define IRQ_WORK_BUSY BIT(1) - -/* Doesn't want IPI, wait for tick: */ -#define IRQ_WORK_LAZY BIT(2) -/* Run hard IRQ context, even on RT */ -#define IRQ_WORK_HARD_IRQ BIT(3) - -#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) - -/* - * structure shares layout with single_call_data_t. - */ struct irq_work { - struct llist_node llnode; - atomic_t flags; + union { + struct __call_single_node node; + struct { + struct llist_node llnode; + atomic_t flags; + }; + }; void (*func)(struct irq_work *); }; diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f613d8529863..d56128df2aff 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -765,6 +765,11 @@ struct journal_s */ int j_errno; + /** + * @j_abort_mutex: Lock the whole aborting procedure. + */ + struct mutex j_abort_mutex; + /** * @j_sb_buffer: The first part of the superblock buffer. */ @@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..ea67910ae6b7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -208,7 +208,7 @@ struct crash_mem_range { struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct crash_mem_range ranges[0]; + struct crash_mem_range ranges[]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index c62d76478adc..529116b0cabe 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -276,8 +276,7 @@ struct kgdb_arch { * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work * for the I/O driver. - * @is_console: 1 if the end device is a console 0 if the I/O device is - * not a console + * @cons: valid if the I/O device is a console; else NULL. */ struct kgdb_io { const char *name; @@ -288,7 +287,7 @@ struct kgdb_io { void (*deinit) (void); void (*pre_exception) (void); void (*post_exception) (void); - int is_console; + struct console *cons; }; extern const struct kgdb_arch arch_kgdb_ops; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 594265bfd390..6adf90f248d7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -161,7 +161,7 @@ struct kretprobe_instance { kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; - char data[0]; + char data[]; }; struct kretprobe_blackpoint { @@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 62ec926c78a0..d564855243d8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -409,7 +409,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[0]; + struct hlist_head map[]; }; #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index af832852e620..77ccf040a128 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include #include #include +#include /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -609,7 +610,7 @@ struct ata_host { struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[0]; + struct ata_port *ports[]; }; struct ata_queued_cmd { @@ -872,6 +873,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; @@ -1092,7 +1095,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, #define ATA_SCSI_COMPAT_IOCTL /* empty */ #endif extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +#if IS_REACHABLE(CONFIG_ATA) bool ata_scsi_dma_need_drain(struct request *rq); +#else +#define ata_scsi_dma_need_drain NULL +#endif extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); extern bool ata_link_online(struct ata_link *link); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 116bd9bb347f..ca1887dd0423 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rst2init_qp_in_bits { @@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_init2init_qp_in_bits { @@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c4c37fd12104..f6f884970511 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -257,8 +257,8 @@ struct lruvec { */ unsigned long anon_cost; unsigned long file_cost; - /* Evictions & activations on the inactive file list */ - atomic_long_t inactive_age; + /* Non-resident age, driven by LRU movement */ + atomic_long_t nonresident_age; /* Refaults at the time of last reclaim cycle */ unsigned long refaults; /* Various lruvec state flags (enum lruvec_flags) */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6fc613ed8eae..39e28e11863c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3157,7 +3157,7 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index b394bd4f68a3..c4676d6feeff 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -25,6 +25,12 @@ int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); + +void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); + +void ipt_unregister_table_exit(struct net *net, struct xt_table *table); + void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 8225f7821a29..1547d5f9ae06 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, const struct nf_hook_ops *ops, struct xt_table **res); void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); +void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); +void ip6t_unregister_table_exit(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 659045046468..93fcef105061 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * struct_size() - Calculate size of structure with trailing array. * @p: Pointer to the structure. * @member: Name of the array member. - * @n: Number of elements in the array. + * @count: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an - * array of @n @member elements. + * array of @count number of @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ -#define struct_size(p, member, n) \ - __ab_c_size(n, \ +#define struct_size(p, member, count) \ + __ab_c_size(count, \ sizeof(*(p)->member) + __must_be_array((p)->member),\ sizeof(*(p))) +/** + * flex_array_size() - Calculate size of a flexible array member + * within an enclosing structure. + * + * @p: Pointer to the structure. + * @member: Name of the flexible array member. + * @count: Number of elements in the array. + * + * Calculates size of a flexible array of @count number of @member + * elements, at the end of structure @p. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define flex_array_size(p, member, count) \ + array_size(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 32b6c52d41b9..56c1e8eb7bb0 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -249,6 +249,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif +#ifndef __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c05d0fb5c00..b693b609b2f5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1416,6 +1416,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd); +int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fbc8679145c..49d155cd2dfe 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -597,7 +597,7 @@ int sev_guest_df_flush(int *error); */ int sev_guest_decommission(struct sev_data_decommission *data, int *error); -void *psp_copy_user_blob(u64 __user uaddr, u32 len); +void *psp_copy_user_blob(u64 uaddr, u32 len); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed..6d15040c642c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } diff --git a/include/linux/sched.h b/include/linux/sched.h index b62e6aaf28f0..692e327d7455 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -654,9 +654,8 @@ struct task_struct { unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; - unsigned int wake_entry_type; int on_cpu; + struct __call_single_node wake_entry; #ifdef CONFIG_THREAD_INFO_IN_TASK /* Current CPU: */ unsigned int cpu; diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ccd82105de8..76731230bbc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -221,7 +221,7 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - __u8 payload[0]; + __u8 payload[]; }; struct sctp_data_chunk { @@ -269,7 +269,7 @@ struct sctp_inithdr { __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[0]; + __u8 params[]; }; struct sctp_init_chunk { @@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param { /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; - uint8_t hostname[0]; + uint8_t hostname[]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; - __be16 types[0]; + __be16 types[]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param { /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; - __u8 random_val[0]; + __u8 random_val[]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; - __be16 hmac_ids[0]; + __be16 hmac_ids[]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): @@ -350,7 +350,7 @@ struct sctp_initack_chunk { /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; - __u8 body[0]; + __u8 body[]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ @@ -384,7 +384,7 @@ struct sctp_sackhdr { __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[0]; + union sctp_sack_variable variable[]; }; struct sctp_sack_chunk { @@ -436,7 +436,7 @@ struct sctp_shutdown_chunk { struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[0]; + __u8 variable[]; }; struct sctp_operr_chunk { @@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip { struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[0]; + struct sctp_fwdtsn_skip skip[]; }; struct sctp_fwdtsn_chunk { @@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip { struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[0]; + struct sctp_ifwdtsn_skip skip[]; }; struct sctp_ifwdtsn_chunk { @@ -658,7 +658,7 @@ struct sctp_addip_param { struct sctp_addiphdr { __be32 serial; - __u8 params[0]; + __u8 params[]; }; struct sctp_addip_chunk { @@ -718,7 +718,7 @@ struct sctp_addip_chunk { struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[0]; + __u8 hmac[]; }; struct sctp_auth_chunk { @@ -733,7 +733,7 @@ struct sctp_infox { struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; - __u8 params[0]; + __u8 params[]; }; struct sctp_strreset_outreq { @@ -741,13 +741,13 @@ struct sctp_strreset_outreq { __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_tsnreq { diff --git a/include/linux/smp.h b/include/linux/smp.h index 7ee202ad21a6..80d557ef8a11 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -12,29 +12,22 @@ #include #include #include -#include +#include typedef void (*smp_call_func_t)(void *info); typedef bool (*smp_cond_func_t)(int cpu, void *info); -enum { - CSD_FLAG_LOCK = 0x01, - - /* IRQ_WORK_flags */ - - CSD_TYPE_ASYNC = 0x00, - CSD_TYPE_SYNC = 0x10, - CSD_TYPE_IRQ_WORK = 0x20, - CSD_TYPE_TTWU = 0x30, - CSD_FLAG_TYPE_MASK = 0xF0, -}; - /* * structure shares (partial) layout with struct irq_work */ struct __call_single_data { - struct llist_node llist; - unsigned int flags; + union { + struct __call_single_node node; + struct { + struct llist_node llist; + unsigned int flags; + }; + }; smp_call_func_t func; void *info; }; diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h new file mode 100644 index 000000000000..364b3ae3e41d --- /dev/null +++ b/include/linux/smp_types.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SMP_TYPES_H +#define __LINUX_SMP_TYPES_H + +#include + +enum { + CSD_FLAG_LOCK = 0x01, + + IRQ_WORK_PENDING = 0x01, + IRQ_WORK_BUSY = 0x02, + IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */ + IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */ + + IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY), + + CSD_TYPE_ASYNC = 0x00, + CSD_TYPE_SYNC = 0x10, + CSD_TYPE_IRQ_WORK = 0x20, + CSD_TYPE_TTWU = 0x30, + + CSD_FLAG_TYPE_MASK = 0xF0, +}; + +/* + * struct __call_single_node is the primary type on + * smp.c:call_single_queue. + * + * flush_smp_call_function_queue() only reads the type from + * __call_single_node::u_flags as a regular load, the above + * (anonymous) enum defines all the bits of this word. + * + * Other bits are not modified until the type is known. + * + * CSD_TYPE_SYNC/ASYNC: + * struct { + * struct llist_node node; + * unsigned int flags; + * smp_call_func_t func; + * void *info; + * }; + * + * CSD_TYPE_IRQ_WORK: + * struct { + * struct llist_node node; + * atomic_t flags; + * void (*func)(struct irq_work *); + * }; + * + * CSD_TYPE_TTWU: + * struct { + * struct llist_node node; + * unsigned int flags; + * }; + * + */ + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +#endif /* __LINUX_SMP_TYPES_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 4c5974bb9ba9..5b3216ba39a9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -313,6 +313,7 @@ struct vma_swap_readahead { }; /* linux/mm/workingset.c */ +void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 7c354c2955f5..b951a87da987 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1360,7 +1360,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user, extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -static inline long ksys_ftruncate(unsigned int fd, unsigned long length) +static inline long ksys_ftruncate(unsigned int fd, loff_t length) { return do_sys_ftruncate(fd, length, 1); } diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 299cbb8c63bb..44073d06710f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -124,7 +124,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index b27e2ffa96c1..d5471d6fa778 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -222,9 +222,9 @@ extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); -/* +/** * struct system_time_snapshot - simultaneous raw/real time capture with - * counter value + * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time * @raw: Monotonic raw system time @@ -239,9 +239,9 @@ struct system_time_snapshot { u8 cs_was_changed_seq; }; -/* +/** * struct system_device_crosststamp - system/device cross-timestamp - * (syncronized capture) + * (synchronized capture) * @device: Device time * @sys_realtime: Realtime simultaneous with device time * @sys_monoraw: Monotonic raw simultaneous with device time @@ -252,12 +252,12 @@ struct system_device_crosststamp { ktime_t sys_monoraw; }; -/* +/** * struct system_counterval_t - system counter value with the pointer to the - * corresponding clocksource + * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used by - * timekeeping code to verify comparibility of two cycle values + * timekeeping code to verify comparibility of two cycle values */ struct system_counterval_t { u64 cycles; diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 4f8c90c93c29..64356b199e94 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; +#define TCG_SPECID_SIG "Spec ID Event03" + struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, int i; int j; u32 count, event_type; + const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; @@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, count = READ_ONCE(event->count); event_type = READ_ONCE(event->event_type); + /* Verify that it's the log header */ + if (event_header->pcr_idx != 0 || + event_header->event_type != NO_ACTION || + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { + size = 0; + goto out; + } + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; /* Check if event is malformed. */ - if (count > efispecid->num_algs) { + if (memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 7bcadca22100..0a76ddc07d59 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -301,13 +301,14 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } -bool probe_kernel_read_allowed(const void *unsafe_src, size_t size); +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); -extern long probe_kernel_read(void *dst, const void *src, size_t size); -extern long probe_user_read(void *dst, const void __user *src, size_t size); +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); -extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +long copy_from_user_nofault(void *dst, const void __user *src, size_t size); +long notrace copy_to_user_nofault(void __user *dst, const void *src, + size_t size); long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count); @@ -317,14 +318,16 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long strnlen_user_nofault(const void __user *unsafe_addr, long count); /** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - * @retval: read into this variable + * get_kernel_nofault(): safely attempt to read from a location + * @val: read into this variable + * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ -#define probe_kernel_address(addr, retval) \ - probe_kernel_read(&retval, addr, sizeof(retval)) +#define get_kernel_nofault(val, ptr) ({ \ + const typeof(val) *__gk_ptr = (ptr); \ + copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ +}) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 48bb681e6c2a..0221f852a7e1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); -extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index f2c8311a0433..6315324b9dc2 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -450,6 +450,7 @@ struct flow_block_indr { struct net_device *dev; enum flow_block_binder_type binder_type; void *data; + void *cb_priv; void (*cleanup)(struct flow_block_cb *block_cb); }; @@ -467,6 +468,13 @@ struct flow_block_cb { struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)); +struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv), + struct flow_block_offload *bo, + struct net_device *dev, void *data, + void *indr_cb_priv, + void (*cleanup)(struct flow_block_cb *block_cb)); void flow_block_cb_free(struct flow_block_cb *block_cb); struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, @@ -488,6 +496,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, list_move(&block_cb->list, &offload->cb_list); } +static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb, + struct flow_block_offload *offload) +{ + list_del(&block_cb->indr.list); + list_move(&block_cb->list, &offload->cb_list); +} + bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list); @@ -532,11 +547,13 @@ static inline void flow_block_init(struct flow_block *flow_block) } typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, - enum tc_setup_type type, void *type_data); + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)); int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, - flow_setup_cb_t *setup_cb); + void (*release)(void *cb_priv)); int flow_indr_dev_setup_offload(struct net_device *dev, enum tc_setup_type type, void *data, struct flow_block_offload *bo, diff --git a/include/net/gue.h b/include/net/gue.h index 3a6595bfa641..e42402f180b7 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -21,7 +21,7 @@ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * C bit indicates contol message when set, data message when unset. + * C bit indicates control message when set, data message when unset. * For a control message, proto/ctype is interpreted as a type of * control message. For data messages, proto/ctype is the IP protocol * of the next header. diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index d7338bfd7b0f..16e8b2f8d006 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -161,10 +161,51 @@ struct nf_flow_route { struct flow_offload *flow_offload_alloc(struct nf_conn *ct); void flow_offload_free(struct flow_offload *flow); -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv); -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv); +static inline int +nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv) +{ + struct flow_block *block = &flow_table->flow_block; + struct flow_block_cb *block_cb; + int err = 0; + + down_write(&flow_table->flow_block_lock); + block_cb = flow_block_cb_lookup(block, cb, cb_priv); + if (block_cb) { + err = -EEXIST; + goto unlock; + } + + block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL); + if (IS_ERR(block_cb)) { + err = PTR_ERR(block_cb); + goto unlock; + } + + list_add_tail(&block_cb->list, &block->cb_list); + +unlock: + up_write(&flow_table->flow_block_lock); + return err; +} + +static inline void +nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv) +{ + struct flow_block *block = &flow_table->flow_block; + struct flow_block_cb *block_cb; + + down_write(&flow_table->flow_block_lock); + block_cb = flow_block_cb_lookup(block, cb, cb_priv); + if (block_cb) { + list_del(&block_cb->list); + flow_block_cb_free(block_cb); + } else { + WARN_ON(true); + } + up_write(&flow_table->flow_block_lock); +} int flow_offload_route_init(struct flow_offload *flow, const struct nf_flow_route *route); diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 15b4d9aec7ff..122d9e2d8dfd 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -353,11 +353,13 @@ enum { ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by local sock family */ -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by + local sock family */ +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by peer */ -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by peer */ /* Reasons to retransmit. */ diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..3428619faae4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1848,7 +1848,6 @@ static inline int sk_rx_queue_get(const struct sock *sk) static inline void sk_set_socket(struct sock *sk, struct socket *sock) { - sk_tx_queue_clear(sk); sk->sk_socket = sock; } diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index 79654bcb9a29..8250d6f0a462 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -66,7 +66,16 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) #endif /* CONFIG_NF_CONNTRACK */ #if IS_ENABLED(CONFIG_NET_ACT_CT) -void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie); +static inline void +tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) +{ + enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK; + struct nf_conn *ct; + + ct = (struct nf_conn *)(cookie & NFCT_PTRMASK); + nf_conntrack_get(&ct->ct_general); + nf_ct_set(skb, ct, ctinfo); +} #else static inline void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 094fe682f5d7..c7d213c9f9d8 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1008,6 +1008,7 @@ struct xfrm_offload { #define XFRM_GRO 32 #define XFRM_ESP_NO_TRAILER 64 #define XFRM_DEV_RESUME 128 +#define XFRM_XMIT 256 __u32 status; #define CRYPTO_SUCCESS 1 diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index b65220685920..8c5e38180fb0 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -161,4 +161,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, #define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm" +struct dmaengine_pcm { + struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; + const struct snd_dmaengine_pcm_config *config; + struct snd_soc_component component; + unsigned int flags; +}; + +static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) +{ + return container_of(p, struct dmaengine_pcm, component); +} #endif diff --git a/include/sound/soc.h b/include/sound/soc.h index ef5dd28e10a9..2756f9bcac3e 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *component_driver, struct snd_soc_dai_driver *dai_drv, int num_dai); void snd_soc_unregister_component(struct device *dev); +struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, + const char *driver_name); struct snd_soc_component *snd_soc_lookup_component(struct device *dev, const char *driver_name); @@ -1361,6 +1363,10 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, bool legacy_dai_naming); +struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev, + struct snd_soc_component *component, + struct snd_soc_dai_driver *dai_drv, + bool legacy_dai_naming); void snd_soc_unregister_dai(struct snd_soc_dai *dai); struct snd_soc_dai *snd_soc_find_dai( diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 1257f26bb887..93b114226af8 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -254,7 +254,6 @@ TRACE_EVENT(block_bio_bounce, * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed - * @error: io error value * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index ba9efdc848f9..059b6e45a028 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -400,7 +400,7 @@ enum rxrpc_tx_point { EM(rxrpc_cong_begin_retransmission, " Retrans") \ EM(rxrpc_cong_cleared_nacks, " Cleared") \ EM(rxrpc_cong_new_low_nack, " NewLowN") \ - EM(rxrpc_cong_no_change, "") \ + EM(rxrpc_cong_no_change, " -") \ EM(rxrpc_cong_progress, " Progres") \ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ EM(rxrpc_cong_rtt_window_end, " RttWinE") \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 19684813faae..974a71342aea 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3168,7 +3168,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h index b6aac7ee1f67..4c14e8be7267 100644 --- a/include/uapi/linux/fb.h +++ b/include/uapi/linux/fb.h @@ -205,6 +205,7 @@ struct fb_bitfield { #define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ #define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ #define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ +#define FB_ACTIVATE_KD_TEXT 512 /* for KDSET vt ioctl */ #define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 379a612f8f1d..f44eb0a04afd 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -262,6 +262,7 @@ struct fsxattr { #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_DAX_FL 0x02000000 /* Inode is DAX */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h index 84f15f48a7cb..bee366540212 100644 --- a/include/uapi/linux/mrp_bridge.h +++ b/include/uapi/linux/mrp_bridge.h @@ -36,7 +36,6 @@ enum br_mrp_port_state_type { enum br_mrp_port_role_type { BR_MRP_PORT_ROLE_PRIMARY, BR_MRP_PORT_ROLE_SECONDARY, - BR_MRP_PORT_ROLE_NONE, }; enum br_mrp_tlv_header_type { diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index de5d90212409..0e09dc5cec19 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -244,6 +244,7 @@ struct nd_cmd_pkg { #define NVDIMM_FAMILY_HPE2 2 #define NVDIMM_FAMILY_MSFT 3 #define NVDIMM_FAMILY_HYPERV 4 +#define NVDIMM_FAMILY_PAPR 5 #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index cba368e55863..c21edb966c19 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -64,10 +64,12 @@ /* supported values for SO_RDS_TRANSPORT */ #define RDS_TRANS_IB 0 -#define RDS_TRANS_IWARP 1 +#define RDS_TRANS_GAP 1 #define RDS_TRANS_TCP 2 #define RDS_TRANS_COUNT 3 #define RDS_TRANS_NONE (~0) +/* don't use RDS_TRANS_IWARP - it is deprecated */ +#define RDS_TRANS_IWARP RDS_TRANS_GAP /* IOCTLS commands for SOL_RDS */ #define SIOCRDSSETTOS (SIOCPROTOPRIVATE) diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h index ee0f2460bff6..d56427c0b3e0 100644 --- a/include/uapi/linux/spi/spidev.h +++ b/include/uapi/linux/spi/spidev.h @@ -48,6 +48,10 @@ #define SPI_TX_QUAD 0x200 #define SPI_RX_DUAL 0x400 #define SPI_RX_QUAD 0x800 +#define SPI_CS_WORD 0x1000 +#define SPI_TX_OCTAL 0x2000 +#define SPI_RX_OCTAL 0x4000 +#define SPI_3WIRE_HIZ 0x8000 /*---------------------------------------------------------------------------*/ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index eca6692667a3..920470502329 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -1030,7 +1030,7 @@ struct vfio_iommu_type1_info_cap_iova_range { * size in bytes that can be used by user applications when getting the dirty * bitmap. */ -#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 1 +#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 2 struct vfio_iommu_type1_info_cap_migration { struct vfio_info_cap_header header; diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index c1395b5bd432..9463db2dfa9d 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h @@ -7,6 +7,7 @@ Copyright (C) 2001 by Andreas Gruenbacher Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. Copyright (c) 2004 Red Hat, Inc., James Morris + Copyright (c) 2020 Jan (janneke) Nieuwenhuizen */ #include @@ -31,6 +32,9 @@ #define XATTR_BTRFS_PREFIX "btrfs." #define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1) +#define XATTR_HURD_PREFIX "gnu." +#define XATTR_HURD_PREFIX_LEN (sizeof(XATTR_HURD_PREFIX) - 1) + #define XATTR_SECURITY_PREFIX "security." #define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1) diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 4d76f16524cc..ac53102e244a 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1276,16 +1276,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) { - if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) + if (unlikely(max_optlen < 0)) return -EINVAL; + if (unlikely(max_optlen > PAGE_SIZE)) { + /* We don't expose optvals that are greater than PAGE_SIZE + * to the BPF program. + */ + max_optlen = PAGE_SIZE; + } + ctx->optval = kzalloc(max_optlen, GFP_USER); if (!ctx->optval) return -ENOMEM; ctx->optval_end = ctx->optval + max_optlen; - return 0; + return max_optlen; } static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) @@ -1319,13 +1326,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, */ max_optlen = max_t(int, 16, *optlen); - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; ctx.optlen = *optlen; - if (copy_from_user(ctx.optval, optval, *optlen) != 0) { + if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1353,8 +1360,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, /* export any potential modifications */ *level = ctx.level; *optname = ctx.optname; - *optlen = ctx.optlen; - *kernel_optval = ctx.optval; + + /* optlen == 0 from BPF indicates that we should + * use original userspace data. + */ + if (ctx.optlen != 0) { + *optlen = ctx.optlen; + *kernel_optval = ctx.optval; + } } out: @@ -1385,12 +1398,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) return retval; - ret = sockopt_alloc_buf(&ctx, max_optlen); - if (ret) - return ret; - ctx.optlen = max_optlen; + max_optlen = sockopt_alloc_buf(&ctx, max_optlen); + if (max_optlen < 0) + return max_optlen; + if (!retval) { /* If kernel getsockopt finished successfully, * copy whatever was returned to the user back @@ -1404,10 +1417,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (ctx.optlen > max_optlen) - ctx.optlen = max_optlen; - - if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { + if (copy_from_user(ctx.optval, optval, + min(ctx.optlen, max_optlen)) != 0) { ret = -EFAULT; goto out; } @@ -1436,10 +1447,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, goto out; } - if (copy_to_user(optval, ctx.optval, ctx.optlen) || - put_user(ctx.optlen, optlen)) { - ret = -EFAULT; - goto out; + if (ctx.optlen != 0) { + if (copy_to_user(optval, ctx.optval, ctx.optlen) || + put_user(ctx.optlen, optlen)) { + ret = -EFAULT; + goto out; + } } ret = ctx.retval; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 0cbb72cdaf63..5fdbc776a760 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -86,12 +86,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list); static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); -static struct hlist_head *dev_map_create_hash(unsigned int entries) +static struct hlist_head *dev_map_create_hash(unsigned int entries, + int numa_node) { int i; struct hlist_head *hash; - hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); + hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); if (hash != NULL) for (i = 0; i < entries; i++) INIT_HLIST_HEAD(&hash[i]); @@ -145,7 +146,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) return -EINVAL; if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, + dtab->map.numa_node); if (!dtab->dev_index_head) goto free_charge; @@ -232,7 +234,7 @@ static void dev_map_free(struct bpf_map *map) } } - kfree(dtab->dev_index_head); + bpf_map_area_free(dtab->dev_index_head); } else { for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index ccc0f98abdd4..9e5934780f41 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -169,18 +169,18 @@ int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; - err = probe_kernel_write((char *)bpt->bpt_addr, + err = copy_to_kernel_nofault((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); return err; } int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { - return probe_kernel_write((char *)bpt->bpt_addr, + return copy_to_kernel_nofault((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } @@ -587,6 +587,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, arch_kgdb_ops.disable_hw_break(regs); acquirelock: + rcu_read_lock(); /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. @@ -646,6 +647,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return 0; } cpu_relax(); @@ -664,6 +666,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); goto acquirelock; } @@ -787,6 +790,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return kgdb_info[cpu].ret_state; } diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 4b280fc7dd67..61774aec46b4 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -247,7 +247,7 @@ char *kgdb_mem2hex(char *mem, char *buf, int count) */ tmp = buf + count; - err = probe_kernel_read(tmp, mem, count); + err = copy_from_kernel_nofault(tmp, mem, count); if (err) return NULL; while (count > 0) { @@ -283,7 +283,7 @@ int kgdb_hex2mem(char *buf, char *mem, int count) *tmp_raw |= hex_to_bin(*tmp_hex--) << 4; } - return probe_kernel_write(mem, tmp_raw, count); + return copy_to_kernel_nofault(mem, tmp_raw, count); } /* @@ -335,7 +335,7 @@ static int kgdb_ebin2mem(char *buf, char *mem, int count) size++; } - return probe_kernel_write(mem, c, size); + return copy_to_kernel_nofault(mem, c, size); } #if DBG_MAX_REG_NUM > 0 diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 924bc9298a42..683a799618ad 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -542,6 +542,44 @@ static int kdb_search_string(char *searched, char *searchfor) return 0; } +static void kdb_msg_write(const char *msg, int msg_len) +{ + struct console *c; + + if (msg_len == 0) + return; + + if (dbg_io_ops) { + const char *cp = msg; + int len = msg_len; + + while (len--) { + dbg_io_ops->write_char(*cp); + cp++; + } + } + + for_each_console(c) { + if (!(c->flags & CON_ENABLED)) + continue; + if (c == dbg_io_ops->cons) + continue; + /* + * Set oops_in_progress to encourage the console drivers to + * disregard their internal spin locks: in the current calling + * context the risk of deadlock is a bigger problem than risks + * due to re-entering the console driver. We operate directly on + * oops_in_progress rather than using bust_spinlocks() because + * the calls bust_spinlocks() makes on exit are not appropriate + * for this calling context. + */ + ++oops_in_progress; + c->write(c, msg, msg_len); + --oops_in_progress; + touch_nmi_watchdog(); + } +} + int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) { int diag; @@ -553,7 +591,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int this_cpu, old_cpu; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; - struct console *c; unsigned long uninitialized_var(flags); /* Serialize kdb_printf if multiple cpus try to write at once. @@ -687,22 +724,11 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) */ retlen = strlen(kdb_buffer); cp = (char *) printk_skip_headers(kdb_buffer); - if (!dbg_kdb_mode && kgdb_connected) { + if (!dbg_kdb_mode && kgdb_connected) gdbstub_msg_write(cp, retlen - (cp - kdb_buffer)); - } else { - if (dbg_io_ops && !dbg_io_ops->is_console) { - len = retlen - (cp - kdb_buffer); - cp2 = cp; - while (len--) { - dbg_io_ops->write_char(*cp2); - cp2++; - } - } - for_each_console(c) { - c->write(c, cp, retlen - (cp - kdb_buffer)); - touch_nmi_watchdog(); - } - } + else + kdb_msg_write(cp, retlen - (cp - kdb_buffer)); + if (logging) { saved_loglevel = console_loglevel; console_loglevel = CONSOLE_LOGLEVEL_SILENT; @@ -751,19 +777,7 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) moreprompt = "more> "; kdb_input_flush(); - - if (dbg_io_ops && !dbg_io_ops->is_console) { - len = strlen(moreprompt); - cp = moreprompt; - while (len--) { - dbg_io_ops->write_char(*cp); - cp++; - } - } - for_each_console(c) { - c->write(c, moreprompt, strlen(moreprompt)); - touch_nmi_watchdog(); - } + kdb_msg_write(moreprompt, strlen(moreprompt)); if (logging) printk("%s", moreprompt); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index ec190569f690..5c7949061671 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2326,7 +2326,8 @@ void kdb_ps1(const struct task_struct *p) int cpu; unsigned long tmp; - if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) + if (!p || + copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long))) return; cpu = kdb_process_cpu(p); diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index b8e6306e7e13..004c5b6c87f8 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -325,7 +325,7 @@ char *kdb_strdup(const char *str, gfp_t type) */ int kdb_getarea_size(void *res, unsigned long addr, size_t size) { - int ret = probe_kernel_read((char *)res, (char *)addr, size); + int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size); if (ret) { if (!KDB_STATE(SUPPRESS)) { kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr); @@ -350,7 +350,7 @@ int kdb_getarea_size(void *res, unsigned long addr, size_t size) */ int kdb_putarea_size(unsigned long addr, void *res, size_t size) { - int ret = probe_kernel_read((char *)addr, (char *)res, size); + int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size); if (ret) { if (!KDB_STATE(SUPPRESS)) { kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr); @@ -624,7 +624,8 @@ char kdb_task_state_char (const struct task_struct *p) char state; unsigned long tmp; - if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) + if (!p || + copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long))) return 'E'; cpu = kdb_process_cpu(p); diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index d006668c0027..1da3f44f2565 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -71,20 +71,21 @@ config SWIOTLB # in the pagetables # config DMA_NONCOHERENT_MMAP - bool - -config DMA_REMAP - depends on MMU - select GENERIC_ALLOCATOR - select DMA_NONCOHERENT_MMAP + default y if !MMU bool config DMA_COHERENT_POOL + select GENERIC_ALLOCATOR bool - select DMA_REMAP + +config DMA_REMAP + bool + depends on MMU + select DMA_NONCOHERENT_MMAP config DMA_DIRECT_REMAP bool + select DMA_REMAP select DMA_COHERENT_POOL config DMA_CMA diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 0a4881e59aa7..93f578a8e613 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -109,14 +109,15 @@ static inline bool dma_should_free_from_pool(struct device *dev, return false; } -struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, +static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp, unsigned long attrs) { - size_t alloc_size = PAGE_ALIGN(size); int node = dev_to_node(dev); struct page *page = NULL; u64 phys_limit; + WARN_ON_ONCE(!PAGE_ALIGNED(size)); + if (attrs & DMA_ATTR_NO_WARN) gfp |= __GFP_NOWARN; @@ -124,14 +125,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp &= ~__GFP_ZERO; gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); - page = dma_alloc_contiguous(dev, alloc_size, gfp); + page = dma_alloc_contiguous(dev, size, gfp); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { - dma_free_contiguous(dev, page, alloc_size); + dma_free_contiguous(dev, page, size); page = NULL; } again: if (!page) - page = alloc_pages_node(node, gfp, get_order(alloc_size)); + page = alloc_pages_node(node, gfp, get_order(size)); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); page = NULL; @@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, { struct page *page; void *ret; + int err; + + size = PAGE_ALIGN(size); if (dma_should_alloc_from_pool(dev, gfp, attrs)) { - ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp); + ret = dma_alloc_from_pool(dev, size, &page, gfp); if (!ret) return NULL; goto done; @@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_alloc_need_uncached(dev, attrs)) || (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { /* remove any dirty cache lines on the kernel alias */ - arch_dma_prep_coherent(page, PAGE_ALIGN(size)); + arch_dma_prep_coherent(page, size); /* create a coherent mapping */ - ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size), + ret = dma_common_contiguous_remap(page, size, dma_pgprot(dev, PAGE_KERNEL, attrs), __builtin_return_address(0)); if (!ret) goto out_free_pages; + if (force_dma_unencrypted(dev)) { + err = set_memory_decrypted((unsigned long)ret, + 1 << get_order(size)); + if (err) + goto out_free_pages; + } memset(ret, 0, size); goto done; } @@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) - set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); + if (force_dma_unencrypted(dev)) { + err = set_memory_decrypted((unsigned long)ret, + 1 << get_order(size)); + if (err) + goto out_free_pages; + } memset(ret, 0, size); @@ -217,7 +231,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, arch_dma_prep_coherent(page, size); ret = arch_dma_set_uncached(ret, size); if (IS_ERR(ret)) - goto out_free_pages; + goto out_encrypt_pages; } done: if (force_dma_unencrypted(dev)) @@ -225,6 +239,15 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, else *dma_handle = phys_to_dma(dev, page_to_phys(page)); return ret; + +out_encrypt_pages: + if (force_dma_unencrypted(dev)) { + err = set_memory_encrypted((unsigned long)page_address(page), + 1 << get_order(size)); + /* If memory cannot be re-encrypted, it must be leaked */ + if (err) + return NULL; + } out_free_pages: dma_free_contiguous(dev, page, size); return NULL; @@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, return ret; } -#ifdef CONFIG_MMU bool dma_direct_can_mmap(struct device *dev) { return dev_is_dma_coherent(dev) || @@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, user_count << PAGE_SHIFT, vma->vm_page_prot); } -#else /* CONFIG_MMU */ -bool dma_direct_can_mmap(struct device *dev) -{ - return false; -} - -int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return -ENXIO; -} -#endif /* CONFIG_MMU */ int dma_direct_supported(struct device *dev, u64 mask) { diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 35bb51c31fff..8cfa01243ed2 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void) * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1. */ if (!atomic_pool_size) { - atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) * - SZ_128K; - atomic_pool_size = min_t(size_t, atomic_pool_size, - 1 << (PAGE_SHIFT + MAX_ORDER-1)); + unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); + pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES); + atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K); } INIT_WORK(&atomic_pool_work, atomic_pool_work_fn); diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index e739a6eea6e7..78b23f089cf1 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size, { void *vaddr; - vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot); + vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, + VM_DMA_COHERENT, prot); if (vaddr) find_vm_area(vaddr)->pages = pages; return vaddr; @@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size, void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, const void *caller) { - int count = size >> PAGE_SHIFT; + int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page **pages; void *vaddr; int i; diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index bb05fd52de85..09cc78df53c6 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -181,34 +181,19 @@ void kimage_file_post_load_cleanup(struct kimage *image) static int kimage_validate_signature(struct kimage *image) { - const char *reason; int ret; ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, image->kernel_buf_len); - switch (ret) { - case 0: - break; + if (ret) { - /* Certain verification errors are non-fatal if we're not - * checking errors, provided we aren't mandating that there - * must be a valid signature. - */ - case -ENODATA: - reason = "kexec of unsigned image"; - goto decide; - case -ENOPKG: - reason = "kexec of image with unsupported crypto"; - goto decide; - case -ENOKEY: - reason = "kexec of image with unavailable key"; - decide: if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) { - pr_notice("%s rejected\n", reason); + pr_notice("Enforced kernel signature verification failed (%d).\n", ret); return ret; } - /* If IMA is guaranteed to appraise a signature on the kexec + /* + * If IMA is guaranteed to appraise a signature on the kexec * image, permit it even if the kernel is otherwise locked * down. */ @@ -216,17 +201,10 @@ kimage_validate_signature(struct kimage *image) security_locked_down(LOCKDOWN_KEXEC)) return -EPERM; - return 0; - - /* All other errors are fatal, including nomem, unparseable - * signatures and signature check failures - even if signatures - * aren't required. - */ - default: - pr_notice("kernel signature verification failed (%d).\n", ret); + pr_debug("kernel signature verification failed (%d).\n", ret); } - return ret; + return 0; } #endif diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 50cd84f53df0..4a904cc56d68 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -46,6 +46,11 @@ static int kprobes_initialized; +/* kprobe_table can be accessed by + * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. + * Or + * - RCU hlist traversal under disabling preempt (breakpoint handlers) + */ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; @@ -326,7 +331,8 @@ struct kprobe *get_kprobe(void *addr) struct kprobe *p; head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry_rcu(p, head, hlist, + lockdep_is_held(&kprobe_mutex)) { if (p->addr == addr) return p; } @@ -586,11 +592,12 @@ static void kprobe_optimizer(struct work_struct *work) mutex_unlock(&module_mutex); mutex_unlock(&text_mutex); cpus_read_unlock(); - mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); + + mutex_unlock(&kprobe_mutex); } /* Wait for completing optimization and unoptimization */ @@ -668,8 +675,6 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op) lockdep_assert_cpus_held(); arch_unoptimize_kprobe(op); op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; - if (kprobe_disabled(&op->kp)) - arch_disarm_kprobe(&op->kp); } /* Unoptimize a kprobe if p is optimized */ @@ -849,7 +854,7 @@ static void optimize_all_kprobes(void) kprobes_allow_optimization = true; for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) + hlist_for_each_entry(p, head, hlist) if (!kprobe_disabled(p)) optimize_kprobe(p); } @@ -876,7 +881,7 @@ static void unoptimize_all_kprobes(void) kprobes_allow_optimization = false; for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry(p, head, hlist) { if (!kprobe_disabled(p)) unoptimize_kprobe(p, false); } @@ -1236,6 +1241,26 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); +struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + /* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated @@ -1253,6 +1278,8 @@ void kprobe_flush_task(struct task_struct *tk) /* Early boot. kretprobe_table_locks not yet initialized. */ return; + kprobe_busy_begin(); + INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; @@ -1266,6 +1293,8 @@ void kprobe_flush_task(struct task_struct *tk) hlist_del(&ri->hlist); kfree(ri); } + + kprobe_busy_end(); } NOKPROBE_SYMBOL(kprobe_flush_task); @@ -1499,12 +1528,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) { struct kprobe *ap, *list_p; + lockdep_assert_held(&kprobe_mutex); + ap = get_kprobe(p->addr); if (unlikely(!ap)) return NULL; if (p != ap) { - list_for_each_entry_rcu(list_p, &ap->list, list) + list_for_each_entry(list_p, &ap->list, list) if (list_p == p) /* kprobe p is a valid probe */ goto valid; @@ -1669,7 +1700,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap) { struct kprobe *kp; - list_for_each_entry_rcu(kp, &ap->list, list) + lockdep_assert_held(&kprobe_mutex); + + list_for_each_entry(kp, &ap->list, list) if (!kprobe_disabled(kp)) /* * There is an active probe on the list. @@ -1748,7 +1781,7 @@ static int __unregister_kprobe_top(struct kprobe *p) else { /* If disabling probe has special handlers, update aggrprobe */ if (p->post_handler && !kprobe_gone(p)) { - list_for_each_entry_rcu(list_p, &ap->list, list) { + list_for_each_entry(list_p, &ap->list, list) { if ((list_p != p) && (list_p->post_handler)) goto noclean; } @@ -2062,13 +2095,15 @@ static void kill_kprobe(struct kprobe *p) { struct kprobe *kp; + lockdep_assert_held(&kprobe_mutex); + p->flags |= KPROBE_FLAG_GONE; if (kprobe_aggrprobe(p)) { /* * If this is an aggr_kprobe, we have to list all the * chained probes and mark them GONE. */ - list_for_each_entry_rcu(kp, &p->list, list) + list_for_each_entry(kp, &p->list, list) kp->flags |= KPROBE_FLAG_GONE; p->post_handler = NULL; kill_optimized_kprobe(p); @@ -2312,7 +2347,7 @@ static int kprobes_module_callback(struct notifier_block *nb, mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) + hlist_for_each_entry(p, head, hlist) if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { @@ -2550,7 +2585,7 @@ static int arm_all_kprobes(void) for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; /* Arm all kprobes on a best-effort basis */ - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry(p, head, hlist) { if (!kprobe_disabled(p)) { err = arm_kprobe(p); if (err) { @@ -2593,7 +2628,7 @@ static int disarm_all_kprobes(void) for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; /* Disarm all kprobes on a best-effort basis */ - hlist_for_each_entry_rcu(p, head, hlist) { + hlist_for_each_entry(p, head, hlist) { if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { err = disarm_kprobe(p, false); if (err) { diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e3d2d7fdf5e..132f84a5fde3 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -201,7 +201,7 @@ void *kthread_probe_data(struct task_struct *task) struct kthread *kthread = to_kthread(task); void *data = NULL; - probe_kernel_read(&data, &kthread->data, sizeof(data)); + copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); return data; } diff --git a/kernel/module.c b/kernel/module.c index e8a198588f26..0c6573b98c36 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2783,7 +2783,9 @@ static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) void * __weak module_alloc(unsigned long size) { - return vmalloc_exec(size); + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __func__); } bool __weak module_init_section(const char *name) diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index b03df67621d0..cd356630a311 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -531,7 +531,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags) } else if (!IS_ERR(pidfd_pid(file))) { err = check_setns_flags(flags); } else { - err = -EBADF; + err = -EINVAL; } if (err) goto out; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 8c14835be46c..b71eaf5f5a86 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -974,16 +974,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) user->idx = log_next_idx; user->seq = log_next_seq; break; - case SEEK_CUR: - /* - * It isn't supported due to the record nature of this - * interface: _SET _DATA and _END point to very specific - * record positions, while _CUR would be more useful in case - * of a byte-based log. Because of that, return the default - * errno value for invalid seek operation. - */ - ret = -ESPIPE; - break; default: ret = -EINVAL; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c716eadc7617..6c6569e0586c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -250,7 +250,7 @@ static noinstr void rcu_dynticks_eqs_enter(void) * next idle sojourn. */ rcu_dynticks_task_trace_enter(); // Before ->dynticks update! - seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); + seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); // RCU is no longer watching. Better be in extended quiescent state! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICK_CTRL_CTR)); @@ -274,13 +274,13 @@ static noinstr void rcu_dynticks_eqs_exit(void) * and we also must force ordering with the next RCU read-side * critical section. */ - seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); + seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); // RCU is now watching. Better not be in an extended quiescent state! rcu_dynticks_task_trace_exit(); // After ->dynticks update! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICK_CTRL_CTR)); if (seq & RCU_DYNTICK_CTRL_MASK) { - atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); + arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); smp_mb__after_atomic(); /* _exit after clearing mask. */ } } @@ -313,7 +313,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); + return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); } /* @@ -633,6 +633,10 @@ static noinstr void rcu_eqs_enter(bool user) do_nocb_deferred_wakeup(rdp); rcu_prepare_for_idle(); rcu_preempt_deferred_qs(current); + + // instrumentation for the noinstr rcu_dynticks_eqs_enter() + instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); + instrumentation_end(); WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ // RCU is watching here ... @@ -692,6 +696,7 @@ noinstr void rcu_nmi_exit(void) { struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + instrumentation_begin(); /* * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. * (We are exiting an NMI handler, so RCU better be paying attention @@ -705,7 +710,6 @@ noinstr void rcu_nmi_exit(void) * leave it in non-RCU-idle state. */ if (rdp->dynticks_nmi_nesting != 1) { - instrumentation_begin(); trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ @@ -714,13 +718,15 @@ noinstr void rcu_nmi_exit(void) return; } - instrumentation_begin(); /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ if (!in_nmi()) rcu_prepare_for_idle(); + + // instrumentation for the noinstr rcu_dynticks_eqs_enter() + instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); instrumentation_end(); // RCU is watching here ... @@ -838,6 +844,10 @@ static void noinstr rcu_eqs_exit(bool user) rcu_dynticks_eqs_exit(); // ... but is watching here. instrumentation_begin(); + + // instrumentation for the noinstr rcu_dynticks_eqs_exit() + instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); + rcu_cleanup_after_idle(); trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); @@ -983,13 +993,21 @@ noinstr void rcu_nmi_enter(void) if (!in_nmi()) rcu_cleanup_after_idle(); + instrumentation_begin(); + // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs() + instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks)); + // instrumentation for the noinstr rcu_dynticks_eqs_exit() + instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); + incby = 1; } else if (!in_nmi()) { instrumentation_begin(); rcu_irq_enter_check_tick(); instrumentation_end(); + } else { + instrumentation_begin(); } - instrumentation_begin(); + trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8f360326861e..ca5db40392d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1637,7 +1637,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; } - if (cpumask_equal(p->cpus_ptr, new_mask)) + if (cpumask_equal(&p->cpus_mask, new_mask)) goto out; /* @@ -2293,8 +2293,15 @@ void sched_ttwu_pending(void *arg) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - llist_for_each_entry_safe(p, t, llist, wake_entry) + llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { + if (WARN_ON_ONCE(p->on_cpu)) + smp_cond_load_acquire(&p->on_cpu, !VAL); + + if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) + set_task_cpu(p, cpu_of(rq)); + ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); + } rq_unlock_irqrestore(rq, &rf); } @@ -2322,7 +2329,7 @@ static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); WRITE_ONCE(rq->ttwu_pending, 1); - __smp_call_single_queue(cpu, &p->wake_entry); + __smp_call_single_queue(cpu, &p->wake_entry.llist); } void wake_up_if_idle(int cpu) @@ -2369,7 +2376,7 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags) * the soon-to-be-idle CPU as the current CPU is likely busy. * nr_running is checked to avoid unnecessary task stacking. */ - if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1) + if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) return true; return false; @@ -2378,6 +2385,9 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags) static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) { if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { + if (WARN_ON_ONCE(cpu == smp_processor_id())) + return false; + sched_clock_cpu(cpu); /* Sync clocks across CPUs */ __ttwu_queue_wakelist(p, cpu, wake_flags); return true; @@ -2528,7 +2538,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) goto out; success = 1; - cpu = task_cpu(p); trace_sched_waking(p); p->state = TASK_RUNNING; trace_sched_wakeup(p); @@ -2550,7 +2559,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) /* We're going to change ->state: */ success = 1; - cpu = task_cpu(p); /* * Ensure we load p->on_rq _after_ p->state, otherwise it would @@ -2614,8 +2622,21 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * which potentially sends an IPI instead of spinning on p->on_cpu to * let the waker make forward progress. This is safe because IRQs are * disabled and the IPI will deliver after on_cpu is cleared. + * + * Ensure we load task_cpu(p) after p->on_cpu: + * + * set_task_cpu(p, cpu); + * STORE p->cpu = @cpu + * __schedule() (switch to task 'p') + * LOCK rq->lock + * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) + * STORE p->on_cpu = 1 LOAD p->cpu + * + * to ensure we observe the correct CPU on which the task is currently + * scheduling. */ - if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ)) + if (smp_load_acquire(&p->on_cpu) && + ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) goto unlock; /* @@ -2635,6 +2656,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) psi_ttwu_dequeue(p); set_task_cpu(p, cpu); } +#else + cpu = task_cpu(p); #endif /* CONFIG_SMP */ ttwu_queue(p, cpu, wake_flags); @@ -2642,7 +2665,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) raw_spin_unlock_irqrestore(&p->pi_lock, flags); out: if (success) - ttwu_stat(p, cpu, wake_flags); + ttwu_stat(p, task_cpu(p), wake_flags); preempt_enable(); return success; @@ -2763,7 +2786,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #endif init_numa_balancing(clone_flags, p); #ifdef CONFIG_SMP - p->wake_entry_type = CSD_TYPE_TTWU; + p->wake_entry.u_flags = CSD_TYPE_TTWU; #endif } @@ -4533,7 +4556,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) */ if (dl_prio(prio)) { if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 504d2f51b0d6..f63f337c7147 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2692,6 +2692,7 @@ void __dl_clear_params(struct task_struct *p) dl_se->dl_bw = 0; dl_se->dl_density = 0; + dl_se->dl_boosted = 0; dl_se->dl_throttled = 0; dl_se->dl_yielded = 0; dl_se->dl_non_contending = 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cbcb2f71599b..658aa7a2ae6f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct task_struct *p) } } - sa->runnable_avg = cpu_scale; + sa->runnable_avg = sa->util_avg; if (p->sched_class != &fair_sched_class) { /* diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 05deb81bb3e3..1ae95b9150d3 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -96,6 +96,15 @@ void __cpuidle default_idle_call(void) } } +static int call_cpuidle_s2idle(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + if (current_clr_polling_and_test()) + return -EBUSY; + + return cpuidle_enter_s2idle(drv, dev); +} + static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, int next_state) { @@ -171,11 +180,9 @@ static void cpuidle_idle_call(void) if (idle_should_enter_s2idle()) { rcu_idle_enter(); - entered_state = cpuidle_enter_s2idle(drv, dev); - if (entered_state > 0) { - local_irq_enable(); + entered_state = call_cpuidle_s2idle(drv, dev); + if (entered_state > 0) goto exit_idle; - } rcu_idle_exit(); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1d4e94c1e5fe..877fb08eb1b0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1682,7 +1682,7 @@ static inline int task_on_rq_migrating(struct task_struct *p) #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x04 /* Internal use, task got migrated */ -#define WF_ON_RQ 0x08 /* Wakee is on_rq */ +#define WF_ON_CPU 0x08 /* Wakee is on_cpu */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution diff --git a/kernel/smp.c b/kernel/smp.c index 472c2b274c65..aa17eedff5be 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -669,24 +669,6 @@ void __init smp_init(void) { int num_nodes, num_cpus; - /* - * Ensure struct irq_work layout matches so that - * flush_smp_call_function_queue() can do horrible things. - */ - BUILD_BUG_ON(offsetof(struct irq_work, llnode) != - offsetof(struct __call_single_data, llist)); - BUILD_BUG_ON(offsetof(struct irq_work, func) != - offsetof(struct __call_single_data, func)); - BUILD_BUG_ON(offsetof(struct irq_work, flags) != - offsetof(struct __call_single_data, flags)); - - /* - * Assert the CSD_TYPE_TTWU layout is similar enough - * for task_struct to be on the @call_single_queue. - */ - BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) != - offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist)); - idle_threads_init(); cpuhp_threads_init(); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 5773f0ba7e76..5ef0484513ec 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -3,6 +3,9 @@ * Copyright (C) 2006 Jens Axboe * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -344,7 +347,8 @@ static int __blk_trace_remove(struct request_queue *q) { struct blk_trace *bt; - bt = xchg(&q->blk_trace, NULL); + bt = rcu_replace_pointer(q->blk_trace, NULL, + lockdep_is_held(&q->blk_trace_mutex)); if (!bt) return -EINVAL; @@ -494,6 +498,17 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (rcu_dereference_protected(q->blk_trace, + lockdep_is_held(&q->blk_trace_mutex))) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; @@ -543,10 +558,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, bt->pid = buts->pid; bt->trace_state = Blktrace_setup; - ret = -EBUSY; - if (cmpxchg(&q->blk_trace, NULL, bt)) - goto err; - + rcu_assign_pointer(q->blk_trace, bt); get_probe_ref(); ret = 0; @@ -1629,7 +1641,8 @@ static int blk_trace_remove_queue(struct request_queue *q) { struct blk_trace *bt; - bt = xchg(&q->blk_trace, NULL); + bt = rcu_replace_pointer(q->blk_trace, NULL, + lockdep_is_held(&q->blk_trace_mutex)); if (bt == NULL) return -EINVAL; @@ -1661,10 +1674,7 @@ static int blk_trace_setup_queue(struct request_queue *q, blk_trace_setup_lba(bt, bdev); - ret = -EBUSY; - if (cmpxchg(&q->blk_trace, NULL, bt)) - goto free_bt; - + rcu_assign_pointer(q->blk_trace, bt); get_probe_ref(); return 0; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e729c9e587a0..7bc3d6175868 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -141,7 +141,7 @@ bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) { int ret; - ret = probe_user_read(dst, unsafe_ptr, size); + ret = copy_from_user_nofault(dst, unsafe_ptr, size); if (unlikely(ret < 0)) memset(dst, 0, size); return ret; @@ -196,7 +196,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) if (unlikely(ret < 0)) goto fail; - ret = probe_kernel_read(dst, unsafe_ptr, size); + ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); if (unlikely(ret < 0)) goto fail; return ret; @@ -241,7 +241,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) if (unlikely(ret < 0)) goto fail; - return 0; + return ret; fail: memset(dst, 0, size); return ret; @@ -326,7 +326,7 @@ BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, if (unlikely(!nmi_uaccess_okay())) return -EPERM; - return probe_user_write(unsafe_ptr, src, size); + return copy_to_user_nofault(unsafe_ptr, src, size); } static const struct bpf_func_proto bpf_probe_write_user_proto = { @@ -661,7 +661,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, copy_size = (fmt[i + 2] == '4') ? 4 : 16; - err = probe_kernel_read(bufs->buf[memcpy_cnt], + err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt], (void *) (long) args[fmt_cnt], copy_size); if (err < 0) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c163c3531faf..1903b80db6eb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2260,7 +2260,7 @@ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, if (hash_contains_ip(ip, op->func_hash)) return op; - } + } return NULL; } @@ -3599,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v) if (direct) seq_printf(m, "\n\tdirect-->%pS", (void *)direct); } - } + } seq_putc(m, '\n'); @@ -7151,6 +7151,10 @@ static int pid_open(struct inode *inode, struct file *file, int type) case TRACE_NO_PIDS: seq_ops = &ftrace_no_pid_sops; break; + default: + trace_array_put(tr); + WARN_ON_ONCE(1); + return -EINVAL; } ret = seq_open(file, seq_ops); @@ -7229,6 +7233,10 @@ pid_write(struct file *filp, const char __user *ubuf, other_pids = rcu_dereference_protected(tr->function_pids, lockdep_is_held(&ftrace_lock)); break; + default: + ret = -EINVAL; + WARN_ON_ONCE(1); + goto out; } ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b8e1ca48be50..00867ff82412 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, if (unlikely(info->add_timestamp)) { bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer); - event = rb_add_time_stamp(event, info->delta, abs); + event = rb_add_time_stamp(event, abs ? info->delta : delta, abs); length -= RB_LEN_TIME_EXTEND; delta = 0; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ec44b0e2a19c..bb62269724d5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3570,7 +3570,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) void tracing_iter_reset(struct trace_iterator *iter, int cpu) { - struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; @@ -3588,7 +3587,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu) * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ - while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { + while (ring_buffer_iter_peek(buf_iter, &ts)) { if (ts >= iter->array_buffer->time_start) break; entries++; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index def769df5bf1..13db4000af3f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -61,6 +61,9 @@ enum trace_type { #undef __field_desc #define __field_desc(type, container, item) +#undef __field_packed +#define __field_packed(type, container, item) + #undef __array #define __array(type, item, size) type item[size]; diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 9de29bb45a27..fa0fc08c6ef8 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c @@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); ret = kprobe_event_gen_cmd_start(&cmd, event, val); - if (ret) + if (ret) { + pr_err("Failed to generate probe: %s\n", buf); break; + } ret = kprobe_event_gen_cmd_end(&cmd); - if (ret) + if (ret) { pr_err("Failed to add probe: %s\n", buf); + break; + } } return ret; @@ -120,7 +124,7 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) } #endif -#ifdef CONFIG_HIST_TRIGGERS +#ifdef CONFIG_SYNTH_EVENTS static int __init trace_boot_add_synth_event(struct xbc_node *node, const char *event) { diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index a523da0dae0a..18c4a58aff79 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry, F_STRUCT( __field_struct( struct ftrace_graph_ent, graph_ent ) - __field_desc( unsigned long, graph_ent, func ) - __field_desc( int, graph_ent, depth ) + __field_packed( unsigned long, graph_ent, func ) + __field_packed( int, graph_ent, depth ) ), F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth) @@ -92,11 +92,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry, F_STRUCT( __field_struct( struct ftrace_graph_ret, ret ) - __field_desc( unsigned long, ret, func ) - __field_desc( unsigned long, ret, overrun ) - __field_desc( unsigned long long, ret, calltime) - __field_desc( unsigned long long, ret, rettime ) - __field_desc( int, ret, depth ) + __field_packed( unsigned long, ret, func ) + __field_packed( unsigned long, ret, overrun ) + __field_packed( unsigned long long, ret, calltime) + __field_packed( unsigned long long, ret, rettime ) + __field_packed( int, ret, depth ) ), F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d", diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 3a74736da363..f725802160c0 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 77ce5a3b6773..70d3d0a09053 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -45,6 +45,9 @@ static int ftrace_event_register(struct trace_event_call *call, #undef __field_desc #define __field_desc(type, container, item) type item; +#undef __field_packed +#define __field_packed(type, container, item) type item; + #undef __array #define __array(type, item, size) type item[size]; @@ -85,6 +88,13 @@ static void __always_unused ____ftrace_check_##name(void) \ .size = sizeof(_type), .align = __alignof__(_type), \ is_signed_type(_type), .filter_type = _filter_type }, + +#undef __field_ext_packed +#define __field_ext_packed(_type, _item, _filter_type) { \ + .type = #_type, .name = #_item, \ + .size = sizeof(_type), .align = 1, \ + is_signed_type(_type), .filter_type = _filter_type }, + #undef __field #define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER) @@ -94,6 +104,9 @@ static void __always_unused ____ftrace_check_##name(void) \ #undef __field_desc #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER) +#undef __field_packed +#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER) + #undef __array #define __array(_type, _item, _len) { \ .type = #_type"["__stringify(_len)"]", .name = #_item, \ @@ -129,6 +142,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \ #undef __field_desc #define __field_desc(type, container, item) +#undef __field_packed +#define __field_packed(type, container, item) + #undef __array #define __array(type, item, len) diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8a4c8d5c2c98..dd4dff71d89a 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -42,7 +42,7 @@ static int allocate_ftrace_ops(struct trace_array *tr) if (!ops) return -ENOMEM; - /* Currently only the non stack verision is supported */ + /* Currently only the non stack version is supported */ ops->func = function_trace_call; ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6048f1be26d2..aefb6065b508 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1222,7 +1222,7 @@ fetch_store_strlen(unsigned long addr) #endif do { - ret = probe_kernel_read(&c, (u8 *)addr + len, 1); + ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); len++; } while (c && ret == 0 && len < MAX_STRING_SIZE); @@ -1290,7 +1290,7 @@ probe_mem_read_user(void *dest, void *src, size_t size) { const void __user *uaddr = (__force const void __user *)src; - return probe_user_read(dest, uaddr, size); + return copy_from_user_nofault(dest, uaddr, size); } static nokprobe_inline int @@ -1300,7 +1300,7 @@ probe_mem_read(void *dest, void *src, size_t size) if ((unsigned long)src < TASK_SIZE) return probe_mem_read_user(dest, src, size); #endif - return probe_kernel_read(dest, src, size); + return copy_from_kernel_nofault(dest, src, size); } /* Note that we don't verify it, since the code does not come from user space */ diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index b8a928e925c7..d2867ccc6aca 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, ret = -EINVAL; goto fail; } - if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) || - parg->count) { + if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || + code->op == FETCH_OP_DATA) || parg->count) { /* * IMM, DATA and COMM is pointing actual address, those * must be kept, and if parg->count != 0, this is an diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index a0ff9e200ef6..a22b62813f8c 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -236,7 +236,7 @@ struct trace_probe_event { struct trace_event_call call; struct list_head files; struct list_head probes; - struct trace_uprobe_filter filter[0]; + struct trace_uprobe_filter filter[]; }; struct trace_probe { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9fbe1e237563..c41c3c17b86a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4638,11 +4638,11 @@ void print_worker_info(const char *log_lvl, struct task_struct *task) * Carefully copy the associated workqueue's workfn, name and desc. * Keep the original last '\0' in case the original is garbage. */ - probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); - probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); - probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); - probe_kernel_read(name, wq->name, sizeof(name) - 1); - probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); + copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn)); + copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); + copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); + copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1); + copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1); if (fn || name[0] || desc[0]) { printk("%sWorkqueue: %s %ps", log_lvl, name, fn); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d74ac0fd6b2d..9ad9210d70a1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -229,7 +229,6 @@ config DEBUG_INFO_COMPRESSED bool "Compressed debugging information" depends on DEBUG_INFO depends on $(cc-option,-gz=zlib) - depends on $(as-option,-Wa$(comma)--compress-debug-sections=zlib) depends on $(ld-option,--compress-debug-sections=zlib) help Compress the debug information using zlib. Requires GCC 5.0+ or Clang diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 81f5464ea9e1..34b84bcbd3d9 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC config CC_HAS_KASAN_SW_TAGS def_bool $(cc-option, -fsanitize=kernel-hwaddress) +config CC_HAS_WORKING_NOSANITIZE_ADDRESS + def_bool !CC_IS_GCC || GCC_VERSION >= 80300 + config KASAN bool "KASAN: runtime memory debugger" depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS) depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS help Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 4e865d42ab03..707453f5d58e 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -91,6 +91,7 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) return ret; } +EXPORT_SYMBOL_GPL(seq_buf_printf); #ifdef CONFIG_BINARY_PRINTF /** diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 28528285942c..a2a82262b97b 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -520,8 +520,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, err_free: kfree(devmem); err_release: - release_mem_region(devmem->pagemap.res.start, - resource_size(&devmem->pagemap.res)); + release_mem_region(res->start, resource_size(res)); err: mutex_unlock(&mdevice->devmem_lock); return false; diff --git a/lib/test_lockup.c b/lib/test_lockup.c index f258743a0d83..bd7c7ff39f6b 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -419,8 +419,8 @@ static bool test_kernel_ptr(unsigned long addr, int size) /* should be at least readable kernel address */ if (access_ok(ptr, 1) || access_ok(ptr + size - 1, 1) || - probe_kernel_address(ptr, buf) || - probe_kernel_address(ptr + size - 1, buf)) { + get_kernel_nofault(buf, ptr) || + get_kernel_nofault(buf, ptr + size - 1)) { pr_err("invalid kernel ptr: %#lx\n", addr); return true; } @@ -437,7 +437,7 @@ static bool __maybe_unused test_magic(unsigned long addr, int offset, if (!addr) return false; - if (probe_kernel_address(ptr, magic) || magic != expected) { + if (get_kernel_nofault(magic, ptr) || magic != expected) { pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n", addr, offset, magic, expected); return true; diff --git a/lib/test_objagg.c b/lib/test_objagg.c index 72c1abfa154d..da137939a410 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -979,10 +979,10 @@ static int test_hints_case(const struct hints_case *hints_case) err_world2_obj_get: for (i--; i >= 0; i--) world_obj_put(&world2, objagg, hints_case->key_ids[i]); - objagg_hints_put(hints); - objagg_destroy(objagg2); i = hints_case->key_ids_count; + objagg_destroy(objagg2); err_check_expect_hints_stats: + objagg_hints_put(hints); err_hints_get: err_check_expect_stats: err_world_obj_get: diff --git a/mm/compaction.c b/mm/compaction.c index fd988b7e5f2b..86375605faa9 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2316,15 +2316,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, .page = NULL, }; - current->capture_control = &capc; + /* + * Make sure the structs are really initialized before we expose the + * capture control, in case we are interrupted and the interrupt handler + * frees a page. + */ + barrier(); + WRITE_ONCE(current->capture_control, &capc); ret = compact_zone(&cc, &capc); VM_BUG_ON(!list_empty(&cc.freepages)); VM_BUG_ON(!list_empty(&cc.migratepages)); - *capture = capc.page; - current->capture_control = NULL; + /* + * Make sure we hide capture control first before we read the captured + * page pointer, otherwise an interrupt could free and capture a page + * and we would leak it. + */ + WRITE_ONCE(current->capture_control, NULL); + *capture = READ_ONCE(capc.page); return ret; } diff --git a/mm/debug.c b/mm/debug.c index b5b1de8c71ac..4f376514744d 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -120,9 +120,9 @@ void __dump_page(struct page *page, const char *reason) * mapping can be invalid pointer and we don't want to crash * accessing it, so probe everything depending on it carefully */ - if (probe_kernel_read(&host, &mapping->host, + if (copy_from_kernel_nofault(&host, &mapping->host, sizeof(struct inode *)) || - probe_kernel_read(&a_ops, &mapping->a_ops, + copy_from_kernel_nofault(&a_ops, &mapping->a_ops, sizeof(struct address_space_operations *))) { pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n"); goto out_mapping; @@ -133,7 +133,7 @@ void __dump_page(struct page *page, const char *reason) goto out_mapping; } - if (probe_kernel_read(&dentry_first, + if (copy_from_kernel_nofault(&dentry_first, &host->i_dentry.first, sizeof(struct hlist_node *))) { pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n", a_ops, host); @@ -146,7 +146,7 @@ void __dump_page(struct page *page, const char *reason) } dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); - if (probe_kernel_read(&dentry, dentry_ptr, + if (copy_from_kernel_nofault(&dentry, dentry_ptr, sizeof(struct dentry))) { pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n", a_ops, dentry_ptr); diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index e45623016aea..61ab16fb2e36 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -246,13 +246,13 @@ static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep, unsigned long vaddr) { - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); pte = __pte(pte_val(pte) | RANDOM_ORVALUE); set_pte_at(mm, vaddr, ptep, pte); barrier(); pte_clear(mm, vaddr, ptep); - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); WARN_ON(!pte_none(pte)); } diff --git a/mm/gup.c b/mm/gup.c index de9e36262ccb..6f47697f8fb0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2196,7 +2196,7 @@ static inline pte_t gup_get_pte(pte_t *ptep) */ static inline pte_t gup_get_pte(pte_t *ptep) { - return READ_ONCE(*ptep); + return ptep_get(ptep); } #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */ @@ -2425,7 +2425,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, if (pte_end < end) end = pte_end; - pte = READ_ONCE(*ptep); + pte = huge_ptep_get(ptep); if (!pte_access_permitted(pte, flags & FOLL_WRITE)) return 0; diff --git a/mm/maccess.c b/mm/maccess.c index 88845eda5047..f98ff91e32c6 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -6,14 +6,15 @@ #include #include -bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size) +bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, + size_t size) { return true; } #ifdef HAVE_GET_KERNEL_NOFAULT -#define probe_kernel_read_loop(dst, src, len, type, err_label) \ +#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __get_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ @@ -21,25 +22,25 @@ bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size) len -= sizeof(type); \ } -long probe_kernel_read(void *dst, const void *src, size_t size) +long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { - if (!probe_kernel_read_allowed(src, size)) + if (!copy_from_kernel_nofault_allowed(src, size)) return -ERANGE; pagefault_disable(); - probe_kernel_read_loop(dst, src, size, u64, Efault); - probe_kernel_read_loop(dst, src, size, u32, Efault); - probe_kernel_read_loop(dst, src, size, u16, Efault); - probe_kernel_read_loop(dst, src, size, u8, Efault); + copy_from_kernel_nofault_loop(dst, src, size, u64, Efault); + copy_from_kernel_nofault_loop(dst, src, size, u32, Efault); + copy_from_kernel_nofault_loop(dst, src, size, u16, Efault); + copy_from_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } -EXPORT_SYMBOL_GPL(probe_kernel_read); +EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); -#define probe_kernel_write_loop(dst, src, len, type, err_label) \ +#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __put_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ @@ -47,13 +48,13 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); len -= sizeof(type); \ } -long probe_kernel_write(void *dst, const void *src, size_t size) +long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { pagefault_disable(); - probe_kernel_write_loop(dst, src, size, u64, Efault); - probe_kernel_write_loop(dst, src, size, u32, Efault); - probe_kernel_write_loop(dst, src, size, u16, Efault); - probe_kernel_write_loop(dst, src, size, u8, Efault); + copy_to_kernel_nofault_loop(dst, src, size, u64, Efault); + copy_to_kernel_nofault_loop(dst, src, size, u32, Efault); + copy_to_kernel_nofault_loop(dst, src, size, u16, Efault); + copy_to_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: @@ -67,7 +68,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) if (unlikely(count <= 0)) return 0; - if (!probe_kernel_read_allowed(unsafe_addr, count)) + if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) return -ERANGE; pagefault_disable(); @@ -87,7 +88,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) } #else /* HAVE_GET_KERNEL_NOFAULT */ /** - * probe_kernel_read(): safely attempt to read from kernel-space + * copy_from_kernel_nofault(): safely attempt to read from kernel-space * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk @@ -98,15 +99,15 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) * * We ensure that the copy_from_user is executed in atomic context so that * do_page_fault() doesn't attempt to take mmap_lock. This makes - * probe_kernel_read() suitable for use within regions where the caller + * copy_from_kernel_nofault() suitable for use within regions where the caller * already holds mmap_lock, or other locks which nest inside mmap_lock. */ -long probe_kernel_read(void *dst, const void *src, size_t size) +long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { long ret; mm_segment_t old_fs = get_fs(); - if (!probe_kernel_read_allowed(src, size)) + if (!copy_from_kernel_nofault_allowed(src, size)) return -ERANGE; set_fs(KERNEL_DS); @@ -120,10 +121,10 @@ long probe_kernel_read(void *dst, const void *src, size_t size) return -EFAULT; return 0; } -EXPORT_SYMBOL_GPL(probe_kernel_read); +EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); /** - * probe_kernel_write(): safely attempt to write to a location + * copy_to_kernel_nofault(): safely attempt to write to a location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk @@ -131,7 +132,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ -long probe_kernel_write(void *dst, const void *src, size_t size) +long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { long ret; mm_segment_t old_fs = get_fs(); @@ -174,7 +175,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) if (unlikely(count <= 0)) return 0; - if (!probe_kernel_read_allowed(unsafe_addr, count)) + if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) return -ERANGE; set_fs(KERNEL_DS); @@ -193,7 +194,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) #endif /* HAVE_GET_KERNEL_NOFAULT */ /** - * probe_user_read(): safely attempt to read from a user-space location + * copy_from_user_nofault(): safely attempt to read from a user-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from. This must be a user address. * @size: size of the data chunk @@ -201,7 +202,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) * Safely read from user address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ -long probe_user_read(void *dst, const void __user *src, size_t size) +long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; mm_segment_t old_fs = get_fs(); @@ -218,10 +219,10 @@ long probe_user_read(void *dst, const void __user *src, size_t size) return -EFAULT; return 0; } -EXPORT_SYMBOL_GPL(probe_user_read); +EXPORT_SYMBOL_GPL(copy_from_user_nofault); /** - * probe_user_write(): safely attempt to write to a user-space location + * copy_to_user_nofault(): safely attempt to write to a user-space location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk @@ -229,7 +230,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ -long probe_user_write(void __user *dst, const void *src, size_t size) +long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; mm_segment_t old_fs = get_fs(); @@ -246,7 +247,7 @@ long probe_user_write(void __user *dst, const void *src, size_t size) return -EFAULT; return 0; } -EXPORT_SYMBOL_GPL(probe_user_write); +EXPORT_SYMBOL_GPL(copy_to_user_nofault); /** * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0b38b6ad547d..19622328e4b5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2772,8 +2772,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, return; cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) + if (!cw) { + css_put(&memcg->css); return; + } cw->memcg = memcg; cw->cachep = cachep; @@ -6360,11 +6362,16 @@ static unsigned long effective_protection(unsigned long usage, * We're using unprotected memory for the weight so that if * some cgroups DO claim explicit protection, we don't protect * the same bytes twice. + * + * Check both usage and parent_usage against the respective + * protected values. One should imply the other, but they + * aren't read atomically - make sure the division is sane. */ if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT)) return ep; - - if (parent_effective > siblings_protected && usage > protected) { + if (parent_effective > siblings_protected && + parent_usage > siblings_protected && + usage > protected) { unsigned long unclaimed; unclaimed = parent_effective - siblings_protected; @@ -6416,7 +6423,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, if (parent == root) { memcg->memory.emin = READ_ONCE(memcg->memory.min); - memcg->memory.elow = memcg->memory.low; + memcg->memory.elow = READ_ONCE(memcg->memory.low); goto out; } @@ -6428,7 +6435,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, atomic_long_read(&parent->memory.children_min_usage))); WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, - memcg->memory.low, READ_ONCE(parent->memory.elow), + READ_ONCE(memcg->memory.low), + READ_ONCE(parent->memory.elow), atomic_long_read(&parent->memory.children_low_usage))); out: diff --git a/mm/memory.c b/mm/memory.c index dc7f3543b1fd..87ec87cdc1ff 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1498,7 +1498,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, } #ifdef pte_index -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, +static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { int err; @@ -1506,8 +1506,9 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd, if (!page_count(page)) return -EINVAL; err = validate_page_before_insert(page); - return err ? err : insert_page_into_pte_locked( - mm, pte_offset_map(pmd, addr), addr, page, prot); + if (err) + return err; + return insert_page_into_pte_locked(mm, pte, addr, page, prot); } /* insert_pages() amortizes the cost of spinlock operations @@ -1517,7 +1518,8 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) { pmd_t *pmd = NULL; - spinlock_t *pte_lock = NULL; + pte_t *start_pte, *pte; + spinlock_t *pte_lock; struct mm_struct *const mm = vma->vm_mm; unsigned long curr_page_idx = 0; unsigned long remaining_pages_total = *num; @@ -1536,18 +1538,17 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, ret = -ENOMEM; if (pte_alloc(mm, pmd)) goto out; - pte_lock = pte_lockptr(mm, pmd); while (pages_to_write_in_pmd) { int pte_idx = 0; const int batch_size = min_t(int, pages_to_write_in_pmd, 8); - spin_lock(pte_lock); - for (; pte_idx < batch_size; ++pte_idx) { - int err = insert_page_in_batch_locked(mm, pmd, + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { + int err = insert_page_in_batch_locked(mm, pte, addr, pages[curr_page_idx], prot); if (unlikely(err)) { - spin_unlock(pte_lock); + pte_unmap_unlock(start_pte, pte_lock); ret = err; remaining_pages_total -= pte_idx; goto out; @@ -1555,7 +1556,7 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, addr += PAGE_SIZE; ++curr_page_idx; } - spin_unlock(pte_lock); + pte_unmap_unlock(start_pte, pte_lock); pages_to_write_in_pmd -= batch_size; remaining_pages_total -= batch_size; } @@ -3140,8 +3141,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) err = mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL); ClearPageSwapCache(page); - if (err) + if (err) { + ret = VM_FAULT_OOM; goto out_page; + } + + /* + * XXX: Move to lru_cache_add() when it + * supports new vs putback + */ + spin_lock_irq(&page_pgdat(page)->lru_lock); + lru_note_cost_page(page); + spin_unlock_irq(&page_pgdat(page)->lru_lock); lru_cache_add(page); swap_readpage(page, true); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9b34e03e730a..da374cd3d45b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -471,11 +471,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) { + const unsigned long end_pfn = start_pfn + nr_pages; struct pglist_data *pgdat = zone->zone_pgdat; - unsigned long flags; + unsigned long pfn, cur_nr_pages, flags; /* Poison struct pages because they are now uninitialized again. */ - page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages); + for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { + cond_resched(); + + /* Select all remaining pages up to the next section boundary */ + cur_nr_pages = + min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); + page_init_poison(pfn_to_page(pfn), + sizeof(struct page) * cur_nr_pages); + } #ifdef CONFIG_ZONE_DEVICE /* diff --git a/mm/nommu.c b/mm/nommu.c index cdcad5d61dd1..f32a69095d50 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -290,23 +290,6 @@ void *vzalloc_node(unsigned long size, int node) } EXPORT_SYMBOL(vzalloc_node); -/** - * vmalloc_exec - allocate virtually contiguous, executable memory - * @size: allocation size - * - * Kernel-internal function to allocate enough pages to cover @size - * the page level allocator and map them into contiguous and - * executable kernel virtual space. - * - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - */ - -void *vmalloc_exec(unsigned long size) -{ - return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM); -} - /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 5e313fa93276..2a99df7beeb3 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c @@ -25,7 +25,7 @@ void rodata_test(void) } /* test 2: write to the variable; this should fault */ - if (!probe_kernel_write((void *)&rodata_test_data, + if (!copy_to_kernel_nofault((void *)&rodata_test_data, (void *)&zero, sizeof(zero))) { pr_err("test data was not read only\n"); return; diff --git a/mm/slab.h b/mm/slab.h index 207c83ef6e06..74f7e09a7cfd 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { - unsigned int nr_pages = 1 << order; + int nr_pages = 1 << order; struct mem_cgroup *memcg; struct lruvec *lruvec; int ret; @@ -388,7 +388,7 @@ static __always_inline int memcg_charge_slab(struct page *page, static __always_inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { - unsigned int nr_pages = 1 << order; + int nr_pages = 1 << order; struct mem_cgroup *memcg; struct lruvec *lruvec; diff --git a/mm/slab_common.c b/mm/slab_common.c index 9e72ba224175..37d48a56431d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1726,7 +1726,7 @@ void kzfree(const void *p) if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); - memset(mem, 0, ks); + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); diff --git a/mm/slub.c b/mm/slub.c index b8f798b50d44..ef303070d175 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -292,7 +292,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) return get_freepointer(s, object); freepointer_addr = (unsigned long)object + s->offset; - probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p)); + copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); return freelist_ptr(s, p, freepointer_addr); } @@ -3766,15 +3766,13 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) } static void list_slab_objects(struct kmem_cache *s, struct page *page, - const char *text, unsigned long *map) + const char *text) { #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); + unsigned long *map; void *p; - if (!map) - return; - slab_err(s, page, text, s->name); slab_lock(page); @@ -3786,6 +3784,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, print_tracking(s, p); } } + put_map(map); slab_unlock(page); #endif } @@ -3799,11 +3798,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) { LIST_HEAD(discard); struct page *page, *h; - unsigned long *map = NULL; - -#ifdef CONFIG_SLUB_DEBUG - map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); -#endif BUG_ON(irqs_disabled()); spin_lock_irq(&n->list_lock); @@ -3813,16 +3807,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) list_add(&page->slab_list, &discard); } else { list_slab_objects(s, page, - "Objects remaining in %s on __kmem_cache_shutdown()", - map); + "Objects remaining in %s on __kmem_cache_shutdown()"); } } spin_unlock_irq(&n->list_lock); -#ifdef CONFIG_SLUB_DEBUG - bitmap_free(map); -#endif - list_for_each_entry_safe(page, h, &discard, slab_list) discard_slab(s, page); } diff --git a/mm/swap.c b/mm/swap.c index dbcab84c6fce..a82efc33411f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -443,8 +443,7 @@ void mark_page_accessed(struct page *page) else __lru_cache_activate_page(page); ClearPageReferenced(page); - if (page_is_file_lru(page)) - workingset_activation(page); + workingset_activation(page); } if (page_is_idle(page)) clear_page_idle(page); diff --git a/mm/swap_state.c b/mm/swap_state.c index e98ff460e9e9..05889e8e3c97 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -21,7 +21,7 @@ #include #include #include - +#include "internal.h" /* * swapper_space is a fiction, retained to simplify the path through @@ -429,7 +429,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, __SetPageSwapBacked(page); /* May fail (-ENOMEM) if XArray node allocation failed. */ - if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) { + if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) { put_swap_page(page, entry); goto fail_unlock; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3091c2ca60df..5a2b55c8dd9a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1862,7 +1862,6 @@ EXPORT_SYMBOL(vm_unmap_ram); * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node - * @prot: memory protection to use. PAGE_KERNEL for regular RAM * * If you use this function for less than VMAP_MAX_ALLOC pages, it could be * faster than vmap so it's good. But if you mix long-life and short-life @@ -2696,26 +2695,6 @@ void *vzalloc_node(unsigned long size, int node) } EXPORT_SYMBOL(vzalloc_node); -/** - * vmalloc_exec - allocate virtually contiguous, executable memory - * @size: allocation size - * - * Kernel-internal function to allocate enough pages to cover @size - * the page level allocator and map them into contiguous and - * executable kernel virtual space. - * - * For tight control over page level allocator and protection flags - * use __vmalloc() instead. - * - * Return: pointer to the allocated memory or %NULL on error - */ -void *vmalloc_exec(unsigned long size) -{ - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, - NUMA_NO_NODE, __builtin_return_address(0)); -} - #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) diff --git a/mm/vmscan.c b/mm/vmscan.c index b6d84326bdf2..749d239c62b2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -904,6 +904,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, __delete_from_swap_cache(page, swap); xa_unlock_irqrestore(&mapping->i_pages, flags); put_swap_page(page, swap); + workingset_eviction(page, target_memcg); } else { void (*freepage)(struct page *); void *shadow = NULL; @@ -1884,6 +1885,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, list_add(&page->lru, &pages_to_free); } else { nr_moved += nr_pages; + if (PageActive(page)) + workingset_age_nonresident(lruvec, nr_pages); } } diff --git a/mm/workingset.c b/mm/workingset.c index d481ea452eeb..50b7937bab32 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -156,8 +156,8 @@ * * Implementation * - * For each node's file LRU lists, a counter for inactive evictions - * and activations is maintained (node->inactive_age). + * For each node's LRU lists, a counter for inactive evictions and + * activations is maintained (node->nonresident_age). * * On eviction, a snapshot of this counter (along with some bits to * identify the node) is stored in the now empty page cache @@ -213,7 +213,17 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, *workingsetp = workingset; } -static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat) +/** + * workingset_age_nonresident - age non-resident entries as LRU ages + * @memcg: the lruvec that was aged + * @nr_pages: the number of pages to count + * + * As in-memory pages are aged, non-resident pages need to be aged as + * well, in order for the refault distances later on to be comparable + * to the in-memory dimensions. This function allows reclaim and LRU + * operations to drive the non-resident aging along in parallel. + */ +void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) { /* * Reclaiming a cgroup means reclaiming all its children in a @@ -227,11 +237,8 @@ static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat) * the root cgroup's, age as well. */ do { - struct lruvec *lruvec; - - lruvec = mem_cgroup_lruvec(memcg, pgdat); - atomic_long_inc(&lruvec->inactive_age); - } while (memcg && (memcg = parent_mem_cgroup(memcg))); + atomic_long_add(nr_pages, &lruvec->nonresident_age); + } while ((lruvec = parent_lruvec(lruvec))); } /** @@ -254,12 +261,11 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); - advance_inactive_age(page_memcg(page), pgdat); - lruvec = mem_cgroup_lruvec(target_memcg, pgdat); + workingset_age_nonresident(lruvec, hpage_nr_pages(page)); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); - eviction = atomic_long_read(&lruvec->inactive_age); + eviction = atomic_long_read(&lruvec->nonresident_age); return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); } @@ -309,20 +315,20 @@ void workingset_refault(struct page *page, void *shadow) if (!mem_cgroup_disabled() && !eviction_memcg) goto out; eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); - refault = atomic_long_read(&eviction_lruvec->inactive_age); + refault = atomic_long_read(&eviction_lruvec->nonresident_age); /* * Calculate the refault distance * * The unsigned subtraction here gives an accurate distance - * across inactive_age overflows in most cases. There is a + * across nonresident_age overflows in most cases. There is a * special case: usually, shadow entries have a short lifetime * and are either refaulted or reclaimed along with the inode * before they get too old. But it is not impossible for the - * inactive_age to lap a shadow entry in the field, which can - * then result in a false small refault distance, leading to a - * false activation should this old entry actually refault - * again. However, earlier kernels used to deactivate + * nonresident_age to lap a shadow entry in the field, which + * can then result in a false small refault distance, leading + * to a false activation should this old entry actually + * refault again. However, earlier kernels used to deactivate * unconditionally with *every* reclaim invocation for the * longest time, so the occasional inappropriate activation * leading to pressure on the active list is not a problem. @@ -359,7 +365,7 @@ void workingset_refault(struct page *page, void *shadow) goto out; SetPageActive(page); - advance_inactive_age(memcg, pgdat); + workingset_age_nonresident(lruvec, hpage_nr_pages(page)); inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE); /* Page was active prior to eviction */ @@ -382,6 +388,7 @@ void workingset_refault(struct page *page, void *shadow) void workingset_activation(struct page *page) { struct mem_cgroup *memcg; + struct lruvec *lruvec; rcu_read_lock(); /* @@ -394,7 +401,8 @@ void workingset_activation(struct page *page) memcg = page_memcg_rcu(page); if (!mem_cgroup_disabled() && !memcg) goto out; - advance_inactive_age(memcg, page_pgdat(page)); + lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); + workingset_age_nonresident(lruvec, hpage_nr_pages(page)); out: rcu_read_unlock(); } diff --git a/net/9p/mod.c b/net/9p/mod.c index c1b62428da7b..5126566850bd 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c @@ -189,3 +189,4 @@ MODULE_AUTHOR("Latchesar Ionkov "); MODULE_AUTHOR("Eric Van Hensbergen "); MODULE_AUTHOR("Ron Minnich "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)"); diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index 24986ec7d38c..779e1eb75443 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -411,10 +411,16 @@ int br_mrp_set_port_role(struct net_bridge_port *p, if (!mrp) return -EINVAL; - if (role == BR_MRP_PORT_ROLE_PRIMARY) + switch (role) { + case BR_MRP_PORT_ROLE_PRIMARY: rcu_assign_pointer(mrp->p_port, p); - else + break; + case BR_MRP_PORT_ROLE_SECONDARY: rcu_assign_pointer(mrp->s_port, p); + break; + default: + return -EINVAL; + } br_mrp_port_switchdev_set_role(p, role); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7501be4eeba0..2130fe0194e6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -217,8 +217,8 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct br_ip addr; + unsigned char eth_addr[ETH_ALEN] __aligned(2); unsigned char flags; - unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry { diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c index 7c9e92b2f806..8e8ffac037cd 100644 --- a/net/bridge/netfilter/nft_meta_bridge.c +++ b/net/bridge/netfilter/nft_meta_bridge.c @@ -155,3 +155,4 @@ module_exit(nft_meta_bridge_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("wenxu "); MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta"); +MODULE_DESCRIPTION("Support for bridge dedicated meta key"); diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index f48cf4cfb80f..deae2c9a0f69 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -455,3 +455,4 @@ module_exit(nft_reject_bridge_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject"); +MODULE_DESCRIPTION("Reject packets from bridge via nftables"); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index afe0e8184c23..4e7edd707a14 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -332,6 +332,7 @@ struct ceph_options *ceph_alloc_options(void) opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT; + opt->read_from_replica = CEPH_READ_FROM_REPLICA_DEFAULT; return opt; } EXPORT_SYMBOL(ceph_alloc_options); @@ -490,16 +491,13 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt, case Opt_read_from_replica: switch (result.uint_32) { case Opt_read_from_replica_no: - opt->osd_req_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS | - CEPH_OSD_FLAG_LOCALIZE_READS); + opt->read_from_replica = 0; break; case Opt_read_from_replica_balance: - opt->osd_req_flags |= CEPH_OSD_FLAG_BALANCE_READS; - opt->osd_req_flags &= ~CEPH_OSD_FLAG_LOCALIZE_READS; + opt->read_from_replica = CEPH_OSD_FLAG_BALANCE_READS; break; case Opt_read_from_replica_localize: - opt->osd_req_flags |= CEPH_OSD_FLAG_LOCALIZE_READS; - opt->osd_req_flags &= ~CEPH_OSD_FLAG_BALANCE_READS; + opt->read_from_replica = CEPH_OSD_FLAG_LOCALIZE_READS; break; default: BUG(); @@ -613,9 +611,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, } seq_putc(m, ','); } - if (opt->osd_req_flags & CEPH_OSD_FLAG_BALANCE_READS) { + if (opt->read_from_replica == CEPH_OSD_FLAG_BALANCE_READS) { seq_puts(m, "read_from_replica=balance,"); - } else if (opt->osd_req_flags & CEPH_OSD_FLAG_LOCALIZE_READS) { + } else if (opt->read_from_replica == CEPH_OSD_FLAG_LOCALIZE_READS) { seq_puts(m, "read_from_replica=localize,"); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 4fea3c33af2a..2db8b44e70c2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -445,8 +445,10 @@ static void target_copy(struct ceph_osd_request_target *dest, dest->size = src->size; dest->min_size = src->min_size; dest->sort_bitwise = src->sort_bitwise; + dest->recovery_deletes = src->recovery_deletes; dest->flags = src->flags; + dest->used_replica = src->used_replica; dest->paused = src->paused; dest->epoch = src->epoch; @@ -1117,10 +1119,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, truncate_size, truncate_seq); } - req->r_flags = flags; req->r_base_oloc.pool = layout->pool_id; req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); + req->r_flags = flags | osdc->client->options->read_from_replica; req->r_snapid = vino.snap; if (flags & CEPH_OSD_FLAG_WRITE) @@ -2431,14 +2433,11 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked) static void account_request(struct ceph_osd_request *req) { - struct ceph_osd_client *osdc = req->r_osdc; - WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK)); WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE))); req->r_flags |= CEPH_OSD_FLAG_ONDISK; - req->r_flags |= osdc->client->options->osd_req_flags; - atomic_inc(&osdc->num_requests); + atomic_inc(&req->r_osdc->num_requests); req->r_start_stamp = jiffies; req->r_start_latency = ktime_get(); diff --git a/net/core/dev.c b/net/core/dev.c index 6bc2388141f6..90b59fc50dc9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4192,10 +4192,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) local_bh_disable(); + dev_xmit_recursion_inc(); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_drv_stopped(txq)) ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); + dev_xmit_recursion_dec(); local_bh_enable(); @@ -9547,6 +9549,13 @@ int register_netdevice(struct net_device *dev) rcu_barrier(); dev->reg_state = NETREG_UNREGISTERED; + /* We should put the kobject that hold in + * netdev_unregister_kobject(), otherwise + * the net device cannot be freed when + * driver calls free_netdev(), because the + * kobject is being hold. + */ + kobject_put(&dev->dev.kobj); } /* * Prevent userspace races by waiting until the network diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 2ee7bc4c9e03..b09bebeadf0b 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1721,3 +1721,4 @@ module_exit(exit_net_drop_monitor); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Neil Horman "); MODULE_ALIAS_GENL_FAMILY("NET_DM"); +MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts"); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 0cfc35e6be28..b739cfab796e 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) } EXPORT_SYMBOL(flow_indr_dev_register); -static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv, +static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), + void *cb_priv, struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { - if (this->cb == setup_cb && - this->cb_priv == cb_priv) { + if (this->release == release && + this->indr.cb_priv == cb_priv) { list_move(&this->indr.list, cleanup_list); return; } @@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list) } void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, - flow_setup_cb_t *setup_cb) + void (*release)(void *cb_priv)) { struct flow_indr_dev *this, *next, *indr_dev = NULL; LIST_HEAD(cleanup_list); @@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, return; } - __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list); + __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); flow_block_indr_notify(&cleanup_list); @@ -429,32 +430,37 @@ EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, struct net_device *dev, void *data, + void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { flow_block->indr.binder_type = bo->binder_type; flow_block->indr.data = data; + flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; flow_block->indr.cleanup = cleanup; } -static void __flow_block_indr_binding(struct flow_block_offload *bo, - struct net_device *dev, void *data, - void (*cleanup)(struct flow_block_cb *block_cb)) +struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, + void *cb_ident, void *cb_priv, + void (*release)(void *cb_priv), + struct flow_block_offload *bo, + struct net_device *dev, void *data, + void *indr_cb_priv, + void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_block_cb *block_cb; - list_for_each_entry(block_cb, &bo->cb_list, list) { - switch (bo->command) { - case FLOW_BLOCK_BIND: - flow_block_indr_init(block_cb, bo, dev, data, cleanup); - list_add(&block_cb->indr.list, &flow_block_indr_list); - break; - case FLOW_BLOCK_UNBIND: - list_del(&block_cb->indr.list); - break; - } - } + block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release); + if (IS_ERR(block_cb)) + goto out; + + flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); + list_add(&block_cb->indr.list, &flow_block_indr_list); + +out: + return block_cb; } +EXPORT_SYMBOL(flow_indr_block_cb_alloc); int flow_indr_dev_setup_offload(struct net_device *dev, enum tc_setup_type type, void *data, @@ -465,9 +471,8 @@ int flow_indr_dev_setup_offload(struct net_device *dev, mutex_lock(&flow_indr_block_lock); list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, this->cb_priv, type, bo); + this->cb(dev, this->cb_priv, type, bo, data, cleanup); - __flow_block_indr_binding(bo, dev, data, cleanup); mutex_unlock(&flow_indr_block_lock); return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0; diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..d832c650287c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -718,7 +718,7 @@ bool sk_mc_loop(struct sock *sk) return inet6_sk(sk)->mc_loop; #endif } - WARN_ON(1); + WARN_ON_ONCE(1); return true; } EXPORT_SYMBOL(sk_mc_loop); @@ -1767,6 +1767,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); + sk_tx_queue_clear(sk); } return sk; @@ -1990,6 +1991,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); + sk_tx_queue_clear(newsk); RCU_INIT_POINTER(newsk->sk_wq, NULL); if (newsk->sk_prot->sockets_allocated) diff --git a/net/core/xdp.c b/net/core/xdp.c index 90f44f382115..3c45f99e26d5 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -462,6 +462,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) xdpf->len = totsize - metasize; xdpf->headroom = 0; xdpf->metasize = metasize; + xdpf->frame_sz = PAGE_SIZE; xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; xsk_buff_free(xdp); diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index e8eaa804ccb9..d6200ff98200 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -13,6 +13,16 @@ #define DSA_HLEN 4 #define EDSA_HLEN 8 +#define FRAME_TYPE_TO_CPU 0x00 +#define FRAME_TYPE_FORWARD 0x03 + +#define TO_CPU_CODE_MGMT_TRAP 0x00 +#define TO_CPU_CODE_FRAME2REG 0x01 +#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02 +#define TO_CPU_CODE_POLICY_TRAP 0x03 +#define TO_CPU_CODE_ARP_MIRROR 0x04 +#define TO_CPU_CODE_POLICY_MIRROR 0x05 + static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 *edsa_header; + int frame_type; + int code; int source_device; int source_port; @@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, /* * Check that frame type is either TO_CPU or FORWARD. */ - if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0) + frame_type = edsa_header[0] >> 6; + + switch (frame_type) { + case FRAME_TYPE_TO_CPU: + code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1); + + /* + * Mark the frame to never egress on any port of the same switch + * unless it's a trapped IGMP/MLD packet, in which case the + * bridge might want to forward it. + */ + if (code != TO_CPU_CODE_IGMP_MLD_TRAP) + skb->offload_fwd_mark = 1; + + break; + + case FRAME_TYPE_FORWARD: + skb->offload_fwd_mark = 1; + break; + + default: return NULL; + } /* * Determine source device and port. @@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, 2 * ETH_ALEN); } - skb->offload_fwd_mark = 1; - return skb; } diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 7b7a0456c15c..7194956aa09e 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -234,6 +234,14 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest, struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1]; int ret; + cfg->first = 100; + cfg->step = 100; + cfg->last = MAX_CABLE_LENGTH_CM; + cfg->pair = PHY_PAIR_ALL; + + if (!nest) + return 0; + ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest, cable_test_tdr_act_cfg_policy, info->extack); if (ret < 0) @@ -242,17 +250,12 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest, if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]) cfg->first = nla_get_u32( tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]); - else - cfg->first = 100; + if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]) cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]); - else - cfg->last = MAX_CABLE_LENGTH_CM; if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]) cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]); - else - cfg->step = 100; if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) { cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]); @@ -263,8 +266,6 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest, "invalid pair parameter"); return -EINVAL; } - } else { - cfg->pair = PHY_PAIR_ALL; } if (cfg->first > MAX_CABLE_LENGTH_CM) { diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 423e640e3876..aaecfc916a4d 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", + [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation", [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation", [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation", + [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index b5df90c981c2..21d5fc0f6bb3 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -2978,7 +2978,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input) sizeof(match->mask.ipv6.dst)); } if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) || - memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) { + memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) { match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS); match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] = diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index 7f47ba89054e..afe5ac8a0f00 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -78,19 +78,18 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, ret = linkstate_get_sqi(dev); if (ret < 0 && ret != -EOPNOTSUPP) - return ret; - + goto out; data->sqi = ret; ret = linkstate_get_sqi_max(dev); if (ret < 0 && ret != -EOPNOTSUPP) - return ret; - + goto out; data->sqi_max = ret; + ret = 0; +out: ethnl_ops_complete(dev); - - return 0; + return ret; } static int linkstate_reply_size(const struct ethnl_req_info *req_base, diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index cd99f548e440..478852ef98ef 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -339,7 +339,7 @@ static void hsr_announce(struct timer_list *t) rcu_read_unlock(); } -static void hsr_del_ports(struct hsr_priv *hsr) +void hsr_del_ports(struct hsr_priv *hsr) { struct hsr_port *port; @@ -356,31 +356,12 @@ static void hsr_del_ports(struct hsr_priv *hsr) hsr_del_port(port); } -/* This has to be called after all the readers are gone. - * Otherwise we would have to check the return value of - * hsr_port_get_hsr(). - */ -static void hsr_dev_destroy(struct net_device *hsr_dev) -{ - struct hsr_priv *hsr = netdev_priv(hsr_dev); - - hsr_debugfs_term(hsr); - hsr_del_ports(hsr); - - del_timer_sync(&hsr->prune_timer); - del_timer_sync(&hsr->announce_timer); - - hsr_del_self_node(hsr); - hsr_del_nodes(&hsr->node_db); -} - static const struct net_device_ops hsr_device_ops = { .ndo_change_mtu = hsr_dev_change_mtu, .ndo_open = hsr_dev_open, .ndo_stop = hsr_dev_close, .ndo_start_xmit = hsr_dev_xmit, .ndo_fix_features = hsr_fix_features, - .ndo_uninit = hsr_dev_destroy, }; static struct device_type hsr_type = { diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h index a099d7de7e79..b8f9262ed101 100644 --- a/net/hsr/hsr_device.h +++ b/net/hsr/hsr_device.h @@ -11,6 +11,7 @@ #include #include "hsr_main.h" +void hsr_del_ports(struct hsr_priv *hsr); void hsr_dev_setup(struct net_device *dev); int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], unsigned char multicast_spec, u8 protocol_version, @@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], void hsr_check_carrier_and_operstate(struct hsr_priv *hsr); bool is_hsr_master(struct net_device *dev); int hsr_get_max_mtu(struct hsr_priv *hsr); - #endif /* __HSR_DEVICE_H */ diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index e2564de67603..144da15f0a81 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -100,8 +101,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER); hsr_del_port(port); if (hsr_slave_empty(master->hsr)) { - unregister_netdevice_queue(master->dev, - &list_kill); + const struct rtnl_link_ops *ops; + + ops = master->dev->rtnl_link_ops; + ops->dellink(master->dev, &list_kill); unregister_netdevice_many(&list_kill); } } @@ -144,9 +147,9 @@ static int __init hsr_init(void) static void __exit hsr_exit(void) { - unregister_netdevice_notifier(&hsr_nb); hsr_netlink_exit(); hsr_debugfs_remove_root(); + unregister_netdevice_notifier(&hsr_nb); } module_init(hsr_init); diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 1decb25f6764..6e14b7d22639 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack); } +static void hsr_dellink(struct net_device *dev, struct list_head *head) +{ + struct hsr_priv *hsr = netdev_priv(dev); + + del_timer_sync(&hsr->prune_timer); + del_timer_sync(&hsr->announce_timer); + + hsr_debugfs_term(hsr); + hsr_del_ports(hsr); + + hsr_del_self_node(hsr); + hsr_del_nodes(&hsr->node_db); + + unregister_netdevice_queue(dev, head); +} + static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); @@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = { .priv_size = sizeof(struct hsr_priv), .setup = hsr_dev_setup, .newlink = hsr_newlink, + .dellink = hsr_dellink, .fill_info = hsr_fill_info, }; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 6ecbb0ced177..e64e59b536d3 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -340,29 +340,31 @@ config NET_FOU_IP_TUNNELS config INET_AH tristate "IP: AH transformation" - select XFRM_ALGO - select CRYPTO - select CRYPTO_HMAC - select CRYPTO_MD5 - select CRYPTO_SHA1 + select XFRM_AH help - Support for IPsec AH. + Support for IPsec AH (Authentication Header). + + AH can be used with various authentication algorithms. Besides + enabling AH support itself, this option enables the generic + implementations of the algorithms that RFC 8221 lists as MUST be + implemented. If you need any other algorithms, you'll need to enable + them in the crypto API. You should also enable accelerated + implementations of any needed algorithms when available. If unsure, say Y. config INET_ESP tristate "IP: ESP transformation" - select XFRM_ALGO - select CRYPTO - select CRYPTO_AUTHENC - select CRYPTO_HMAC - select CRYPTO_MD5 - select CRYPTO_CBC - select CRYPTO_SHA1 - select CRYPTO_DES - select CRYPTO_ECHAINIV + select XFRM_ESP help - Support for IPsec ESP. + Support for IPsec ESP (Encapsulating Security Payload). + + ESP can be used with various encryption and authentication algorithms. + Besides enabling ESP support itself, this option enables the generic + implementations of the algorithms that RFC 8221 lists as MUST be + implemented. If you need any other algorithms, you'll need to enable + them in the crypto API. You should also enable accelerated + implementations of any needed algorithms when available. If unsure, say Y. diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index d14133eac476..5bda5aeda579 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -361,3 +361,4 @@ module_exit(esp4_offload_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); +MODULE_DESCRIPTION("IPV4 GSO/GRO offload support"); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index e53871e4a097..1f75dc686b6b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - if (table) + if (table && table != RT_TABLE_MAIN) tbl = fib_get_table(net, table); if (tbl) diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index dcc79ff54b41..abd083415f89 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -1304,3 +1304,4 @@ module_init(fou_init); module_exit(fou_fini); MODULE_AUTHOR("Tom Herbert "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Foo over UDP"); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index f4f1d11eab50..0c1f36404471 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key) { - unsigned int hash; struct ip_tunnel *t, *cand = NULL; struct hlist_head *head; + struct net_device *ndev; + unsigned int hash; hash = ip_tunnel_hash(key, remote); head = &itn->tunnels[hash]; @@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (t && t->dev->flags & IFF_UP) return t; - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) - return netdev_priv(itn->fb_tunnel_dev); + ndev = READ_ONCE(itn->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -1259,9 +1261,9 @@ void ip_tunnel_uninit(struct net_device *dev) struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); - /* fb_tunnel_dev will be unregisted in net-exit call. */ - if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(itn, netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); + if (itn->fb_tunnel_dev == dev) + WRITE_ONCE(itn->fb_tunnel_dev, NULL); dst_cache_reset(&tunnel->dst_cache); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c2670eaa74e6..5bf9fa06aee0 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1797,11 +1797,22 @@ int ipt_register_table(struct net *net, const struct xt_table *table, return ret; } +void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops) +{ + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); +} + +void ipt_unregister_table_exit(struct net *net, struct xt_table *table) +{ + __ipt_unregister_table(net, table); +} + void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { if (ops) - nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); + ipt_unregister_table_pre_exit(net, table, ops); __ipt_unregister_table(net, table); } @@ -1958,6 +1969,8 @@ static void __exit ip_tables_fini(void) EXPORT_SYMBOL(ipt_register_table); EXPORT_SYMBOL(ipt_unregister_table); +EXPORT_SYMBOL(ipt_unregister_table_pre_exit); +EXPORT_SYMBOL(ipt_unregister_table_exit); EXPORT_SYMBOL(ipt_do_table); module_init(ip_tables_init); module_exit(ip_tables_fini); diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 748dc3ce58d3..f2984c7eef40 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -118,3 +118,4 @@ module_exit(synproxy_tg4_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); +MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies"); diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 9d54b4017e50..8f7bc1ee7453 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -72,16 +72,24 @@ static int __net_init iptable_filter_net_init(struct net *net) return 0; } +static void __net_exit iptable_filter_net_pre_exit(struct net *net) +{ + if (net->ipv4.iptable_filter) + ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter, + filter_ops); +} + static void __net_exit iptable_filter_net_exit(struct net *net) { if (!net->ipv4.iptable_filter) return; - ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops); + ipt_unregister_table_exit(net, net->ipv4.iptable_filter); net->ipv4.iptable_filter = NULL; } static struct pernet_operations iptable_filter_net_ops = { .init = iptable_filter_net_init, + .pre_exit = iptable_filter_net_pre_exit, .exit = iptable_filter_net_exit, }; diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index bb9266ea3785..f703a717ab1d 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -100,15 +100,23 @@ static int __net_init iptable_mangle_table_init(struct net *net) return ret; } +static void __net_exit iptable_mangle_net_pre_exit(struct net *net) +{ + if (net->ipv4.iptable_mangle) + ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle, + mangle_ops); +} + static void __net_exit iptable_mangle_net_exit(struct net *net) { if (!net->ipv4.iptable_mangle) return; - ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops); + ipt_unregister_table_exit(net, net->ipv4.iptable_mangle); net->ipv4.iptable_mangle = NULL; } static struct pernet_operations iptable_mangle_net_ops = { + .pre_exit = iptable_mangle_net_pre_exit, .exit = iptable_mangle_net_exit, }; diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index ad33687b7444..b0143b109f25 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -113,16 +113,22 @@ static int __net_init iptable_nat_table_init(struct net *net) return ret; } +static void __net_exit iptable_nat_net_pre_exit(struct net *net) +{ + if (net->ipv4.nat_table) + ipt_nat_unregister_lookups(net); +} + static void __net_exit iptable_nat_net_exit(struct net *net) { if (!net->ipv4.nat_table) return; - ipt_nat_unregister_lookups(net); - ipt_unregister_table(net, net->ipv4.nat_table, NULL); + ipt_unregister_table_exit(net, net->ipv4.nat_table); net->ipv4.nat_table = NULL; } static struct pernet_operations iptable_nat_net_ops = { + .pre_exit = iptable_nat_net_pre_exit, .exit = iptable_nat_net_exit, }; diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 69697eb4bfc6..9abfe6bf2cb9 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -67,15 +67,23 @@ static int __net_init iptable_raw_table_init(struct net *net) return ret; } +static void __net_exit iptable_raw_net_pre_exit(struct net *net) +{ + if (net->ipv4.iptable_raw) + ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw, + rawtable_ops); +} + static void __net_exit iptable_raw_net_exit(struct net *net) { if (!net->ipv4.iptable_raw) return; - ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops); + ipt_unregister_table_exit(net, net->ipv4.iptable_raw); net->ipv4.iptable_raw = NULL; } static struct pernet_operations iptable_raw_net_ops = { + .pre_exit = iptable_raw_net_pre_exit, .exit = iptable_raw_net_exit, }; diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index ac633c1db97e..415c1975d770 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c @@ -62,16 +62,23 @@ static int __net_init iptable_security_table_init(struct net *net) return ret; } +static void __net_exit iptable_security_net_pre_exit(struct net *net) +{ + if (net->ipv4.iptable_security) + ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security, + sectbl_ops); +} + static void __net_exit iptable_security_net_exit(struct net *net) { if (!net->ipv4.iptable_security) return; - - ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops); + ipt_unregister_table_exit(net, net->ipv4.iptable_security); net->ipv4.iptable_security = NULL; } static struct pernet_operations iptable_security_net_ops = { + .pre_exit = iptable_security_net_pre_exit, .exit = iptable_security_net_exit, }; diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c index e32e41b99f0f..aba65fe90345 100644 --- a/net/ipv4/netfilter/nf_flow_table_ipv4.c +++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c @@ -34,3 +34,4 @@ module_exit(nf_flow_ipv4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NF_FLOWTABLE(AF_INET); +MODULE_DESCRIPTION("Netfilter flow table support"); diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index abf89b972094..bcdb37f86a94 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c @@ -107,3 +107,4 @@ module_exit(nft_dup_ipv4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup"); +MODULE_DESCRIPTION("IPv4 nftables packet duplication support"); diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index ce294113dbcd..03df986217b7 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -210,3 +210,4 @@ module_exit(nft_fib4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal "); MODULE_ALIAS_NFT_AF_EXPR(2, "fib"); +MODULE_DESCRIPTION("nftables fib / ip route lookup support"); diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c index 7e6fd5cde50f..e408f813f5d8 100644 --- a/net/ipv4/netfilter/nft_reject_ipv4.c +++ b/net/ipv4/netfilter/nft_reject_ipv4.c @@ -71,3 +71,4 @@ module_exit(nft_reject_ipv4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject"); +MODULE_DESCRIPTION("IPv4 packet rejection for nftables"); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 8f8eefd3a3ce..c7bf5b26bf0c 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { - if (ca->curr_rtt > delay) - ca->curr_rtt = delay; - ca->sample_cnt++; } else { if (ca->curr_rtt > ca->delay_min + diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 83330a6cb242..f3a0eb139b76 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) * cwnd may be very low (even just 1 packet), so we should ACK * immediately. */ - inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; + if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } } @@ -3665,6 +3666,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_in_ack_event(sk, ack_ev_flags); } + /* This is a deviation from RFC3168 since it states that: + * "When the TCP data sender is ready to set the CWR bit after reducing + * the congestion window, it SHOULD set the CWR bit only on the first + * new data packet that it transmits." + * We accept CWR on pure ACKs to be more robust + * with widely-deployed TCP implementations that do this. + */ + tcp_ecn_accept_cwr(sk, skb); + /* We passed data and got it acked, remove any soft error * log. Something worked... */ @@ -4605,7 +4615,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { coalesce_done: - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4689,7 +4703,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); skb_condense(skb); skb_set_owner_r(skb, sk); } @@ -4792,8 +4810,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); - tcp_ecn_accept_cwr(sk, skb); - tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 992cf45fb4f6..f4f19e89af5e 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -49,29 +49,31 @@ config IPV6_OPTIMISTIC_DAD config INET6_AH tristate "IPv6: AH transformation" - select XFRM_ALGO - select CRYPTO - select CRYPTO_HMAC - select CRYPTO_MD5 - select CRYPTO_SHA1 + select XFRM_AH help - Support for IPsec AH. + Support for IPsec AH (Authentication Header). + + AH can be used with various authentication algorithms. Besides + enabling AH support itself, this option enables the generic + implementations of the algorithms that RFC 8221 lists as MUST be + implemented. If you need any other algorithms, you'll need to enable + them in the crypto API. You should also enable accelerated + implementations of any needed algorithms when available. If unsure, say Y. config INET6_ESP tristate "IPv6: ESP transformation" - select XFRM_ALGO - select CRYPTO - select CRYPTO_AUTHENC - select CRYPTO_HMAC - select CRYPTO_MD5 - select CRYPTO_CBC - select CRYPTO_SHA1 - select CRYPTO_DES - select CRYPTO_ECHAINIV + select XFRM_ESP help - Support for IPsec ESP. + Support for IPsec ESP (Encapsulating Security Payload). + + ESP can be used with various encryption and authentication algorithms. + Besides enabling ESP support itself, this option enables the generic + implementations of the algorithms that RFC 8221 lists as MUST be + implemented. If you need any other algorithms, you'll need to enable + them in the crypto API. You should also enable accelerated + implementations of any needed algorithms when available. If unsure, say Y. diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 55addea1948f..1ca516fb30e1 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -395,3 +395,4 @@ module_exit(esp6_offload_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); +MODULE_DESCRIPTION("IPV6 GSO/GRO offload support"); diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index 091f94184dc1..430518ae26fa 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c @@ -224,3 +224,4 @@ module_init(fou6_init); module_exit(fou6_fini); MODULE_AUTHOR("Tom Herbert "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Foo over UDP (IPv6)"); diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c index 257d2b681246..36c58aa257e8 100644 --- a/net/ipv6/ila/ila_main.c +++ b/net/ipv6/ila/ila_main.c @@ -120,3 +120,4 @@ module_init(ila_init); module_exit(ila_fini); MODULE_AUTHOR("Tom Herbert "); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)"); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 781ca8c07a0d..6532bde82b40 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, gre_proto == htons(ETH_P_ERSPAN2)) ? ARPHRD_ETHER : ARPHRD_IP6GRE; int score, cand_score = 4; + struct net_device *ndev; for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { if (!ipv6_addr_equal(local, &t->parms.laddr) || @@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, if (t && t->dev->flags & IFF_UP) return t; - dev = ign->fb_tunnel_dev; - if (dev && dev->flags & IFF_UP) - return netdev_priv(dev); + ndev = READ_ONCE(ign->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) ip6gre_tunnel_unlink_md(ign, t); ip6gre_tunnel_unlink(ign, t); + if (ign->fb_tunnel_dev == dev) + WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); dev_put(dev); } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 7e12d2114158..8cd2782a31e4 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2615,6 +2615,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) idev->mc_list = i->next; write_unlock_bh(&idev->lock); + ip6_mc_clear_src(i); ma_put(i); write_lock_bh(&idev->lock); } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e27393498ecb..e96a431549bc 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1807,11 +1807,22 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, return ret; } +void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops) +{ + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); +} + +void ip6t_unregister_table_exit(struct net *net, struct xt_table *table) +{ + __ip6t_unregister_table(net, table); +} + void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { if (ops) - nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); + ip6t_unregister_table_pre_exit(net, table, ops); __ip6t_unregister_table(net, table); } @@ -1969,6 +1980,8 @@ static void __exit ip6_tables_fini(void) EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); +EXPORT_SYMBOL(ip6t_unregister_table_pre_exit); +EXPORT_SYMBOL(ip6t_unregister_table_exit); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index fd1f52a21bf1..d51d0c3e5fe9 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -121,3 +121,4 @@ module_exit(synproxy_tg6_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); +MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies"); diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c index 32667f5d5a33..88337b51ffbf 100644 --- a/net/ipv6/netfilter/ip6table_filter.c +++ b/net/ipv6/netfilter/ip6table_filter.c @@ -73,16 +73,24 @@ static int __net_init ip6table_filter_net_init(struct net *net) return 0; } +static void __net_exit ip6table_filter_net_pre_exit(struct net *net) +{ + if (net->ipv6.ip6table_filter) + ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter, + filter_ops); +} + static void __net_exit ip6table_filter_net_exit(struct net *net) { if (!net->ipv6.ip6table_filter) return; - ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops); + ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter); net->ipv6.ip6table_filter = NULL; } static struct pernet_operations ip6table_filter_net_ops = { .init = ip6table_filter_net_init, + .pre_exit = ip6table_filter_net_pre_exit, .exit = ip6table_filter_net_exit, }; diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 070afb97fa2b..1a2748611e00 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -93,16 +93,24 @@ static int __net_init ip6table_mangle_table_init(struct net *net) return ret; } +static void __net_exit ip6table_mangle_net_pre_exit(struct net *net) +{ + if (net->ipv6.ip6table_mangle) + ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle, + mangle_ops); +} + static void __net_exit ip6table_mangle_net_exit(struct net *net) { if (!net->ipv6.ip6table_mangle) return; - ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops); + ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle); net->ipv6.ip6table_mangle = NULL; } static struct pernet_operations ip6table_mangle_net_ops = { + .pre_exit = ip6table_mangle_net_pre_exit, .exit = ip6table_mangle_net_exit, }; diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 0f4875952efc..0a23265e3caa 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -114,16 +114,22 @@ static int __net_init ip6table_nat_table_init(struct net *net) return ret; } +static void __net_exit ip6table_nat_net_pre_exit(struct net *net) +{ + if (net->ipv6.ip6table_nat) + ip6t_nat_unregister_lookups(net); +} + static void __net_exit ip6table_nat_net_exit(struct net *net) { if (!net->ipv6.ip6table_nat) return; - ip6t_nat_unregister_lookups(net); - ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL); + ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat); net->ipv6.ip6table_nat = NULL; } static struct pernet_operations ip6table_nat_net_ops = { + .pre_exit = ip6table_nat_net_pre_exit, .exit = ip6table_nat_net_exit, }; diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index a22100b1cf2c..8f9e742226f7 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c @@ -66,15 +66,23 @@ static int __net_init ip6table_raw_table_init(struct net *net) return ret; } +static void __net_exit ip6table_raw_net_pre_exit(struct net *net) +{ + if (net->ipv6.ip6table_raw) + ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw, + rawtable_ops); +} + static void __net_exit ip6table_raw_net_exit(struct net *net) { if (!net->ipv6.ip6table_raw) return; - ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops); + ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw); net->ipv6.ip6table_raw = NULL; } static struct pernet_operations ip6table_raw_net_ops = { + .pre_exit = ip6table_raw_net_pre_exit, .exit = ip6table_raw_net_exit, }; diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c index a74335fe2bd9..5e8c48fed032 100644 --- a/net/ipv6/netfilter/ip6table_security.c +++ b/net/ipv6/netfilter/ip6table_security.c @@ -61,15 +61,23 @@ static int __net_init ip6table_security_table_init(struct net *net) return ret; } +static void __net_exit ip6table_security_net_pre_exit(struct net *net) +{ + if (net->ipv6.ip6table_security) + ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security, + sectbl_ops); +} + static void __net_exit ip6table_security_net_exit(struct net *net) { if (!net->ipv6.ip6table_security) return; - ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops); + ip6t_unregister_table_exit(net, net->ipv6.ip6table_security); net->ipv6.ip6table_security = NULL; } static struct pernet_operations ip6table_security_net_ops = { + .pre_exit = ip6table_security_net_pre_exit, .exit = ip6table_security_net_exit, }; diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c index a8566ee12e83..667b8af2546a 100644 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c @@ -35,3 +35,4 @@ module_exit(nf_flow_ipv6_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NF_FLOWTABLE(AF_INET6); +MODULE_DESCRIPTION("Netfilter flow table IPv6 module"); diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 2af32200507d..8b5193efb1f1 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c @@ -105,3 +105,4 @@ module_exit(nft_dup_ipv6_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup"); +MODULE_DESCRIPTION("IPv6 nftables packet duplication support"); diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 7ece86afd079..e204163c7036 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -255,3 +255,4 @@ module_exit(nft_fib6_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal "); MODULE_ALIAS_NFT_AF_EXPR(10, "fib"); +MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support"); diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c index 680a28ce29fd..c1098a1968e1 100644 --- a/net/ipv6/netfilter/nft_reject_ipv6.c +++ b/net/ipv6/netfilter/nft_reject_ipv6.c @@ -72,3 +72,4 @@ module_exit(nft_reject_ipv6_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject"); +MODULE_DESCRIPTION("IPv6 packet rejection for nftables"); diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 490b92534afc..df9a51425c6f 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, */ subflow->snd_isn = TCP_SKB_CB(skb)->end_seq; if (subflow->request_mptcp) { - pr_debug("local_key=%llu", subflow->local_key); opts->suboptions = OPTION_MPTCP_MPC_SYN; - opts->sndr_key = subflow->local_key; *size = TCPOLEN_MPTCP_MPC_SYN; return true; } else if (subflow->request_join) { diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 809687d3f410..c6eeaf3e8dcb 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -135,8 +135,6 @@ static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field) ((nib & 0xF) << 8) | field); } -#define MPTCP_PM_MAX_ADDR 4 - struct mptcp_addr_info { sa_family_t family; __be16 port; @@ -234,10 +232,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); - if (list_empty(&msk->rtx_queue)) - return NULL; - - return list_first_entry(&msk->rtx_queue, struct mptcp_data_frag, list); + return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list); } struct mptcp_subflow_request_sock { @@ -254,6 +249,7 @@ struct mptcp_subflow_request_sock { u64 thmac; u32 local_nonce; u32 remote_nonce; + struct mptcp_sock *msk; }; static inline struct mptcp_subflow_request_sock * diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index bf132575040d..3838a0b3a21f 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -69,6 +69,9 @@ static void subflow_req_destructor(struct request_sock *req) pr_debug("subflow_req=%p", subflow_req); + if (subflow_req->msk) + sock_put((struct sock *)subflow_req->msk); + if (subflow_req->mp_capable) mptcp_token_destroy_request(subflow_req->token); tcp_request_sock_ops.destructor(req); @@ -86,8 +89,8 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, } /* validate received token and create truncated hmac and nonce for SYN-ACK */ -static bool subflow_token_join_request(struct request_sock *req, - const struct sk_buff *skb) +static struct mptcp_sock *subflow_token_join_request(struct request_sock *req, + const struct sk_buff *skb) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); u8 hmac[SHA256_DIGEST_SIZE]; @@ -97,13 +100,13 @@ static bool subflow_token_join_request(struct request_sock *req, msk = mptcp_token_get_sock(subflow_req->token); if (!msk) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); - return false; + return NULL; } local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req); if (local_id < 0) { sock_put((struct sock *)msk); - return false; + return NULL; } subflow_req->local_id = local_id; @@ -114,9 +117,7 @@ static bool subflow_token_join_request(struct request_sock *req, subflow_req->remote_nonce, hmac); subflow_req->thmac = get_unaligned_be64(hmac); - - sock_put((struct sock *)msk); - return true; + return msk; } static void subflow_init_req(struct request_sock *req, @@ -133,6 +134,7 @@ static void subflow_init_req(struct request_sock *req, subflow_req->mp_capable = 0; subflow_req->mp_join = 0; + subflow_req->msk = NULL; #ifdef CONFIG_TCP_MD5SIG /* no MPTCP if MD5SIG is enabled on this socket or we may run out of @@ -166,12 +168,9 @@ static void subflow_init_req(struct request_sock *req, subflow_req->remote_id = mp_opt.join_id; subflow_req->token = mp_opt.token; subflow_req->remote_nonce = mp_opt.nonce; - pr_debug("token=%u, remote_nonce=%u", subflow_req->token, - subflow_req->remote_nonce); - if (!subflow_token_join_request(req, skb)) { - subflow_req->mp_join = 0; - // @@ need to trigger RST - } + subflow_req->msk = subflow_token_join_request(req, skb); + pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, + subflow_req->remote_nonce, subflow_req->msk); } } @@ -354,10 +353,9 @@ static bool subflow_hmac_valid(const struct request_sock *req, const struct mptcp_subflow_request_sock *subflow_req; u8 hmac[SHA256_DIGEST_SIZE]; struct mptcp_sock *msk; - bool ret; subflow_req = mptcp_subflow_rsk(req); - msk = mptcp_token_get_sock(subflow_req->token); + msk = subflow_req->msk; if (!msk) return false; @@ -365,12 +363,7 @@ static bool subflow_hmac_valid(const struct request_sock *req, subflow_req->remote_nonce, subflow_req->local_nonce, hmac); - ret = true; - if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN)) - ret = false; - - sock_put((struct sock *)msk); - return ret; + return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); } static void mptcp_sock_destruct(struct sock *sk) @@ -438,22 +431,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); struct mptcp_subflow_request_sock *subflow_req; struct mptcp_options_received mp_opt; - bool fallback_is_fatal = false; + bool fallback, fallback_is_fatal; struct sock *new_msk = NULL; - bool fallback = false; struct sock *child; pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); - /* we need later a valid 'mp_capable' value even when options are not - * parsed + /* After child creation we must look for 'mp_capable' even when options + * are not parsed */ mp_opt.mp_capable = 0; - if (tcp_rsk(req)->is_mptcp == 0) + + /* hopefully temporary handling for MP_JOIN+syncookie */ + subflow_req = mptcp_subflow_rsk(req); + fallback_is_fatal = subflow_req->mp_join; + fallback = !tcp_rsk(req)->is_mptcp; + if (fallback) goto create_child; /* if the sk is MP_CAPABLE, we try to fetch the client key */ - subflow_req = mptcp_subflow_rsk(req); if (subflow_req->mp_capable) { if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { /* here we can receive and accept an in-window, @@ -474,12 +470,11 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, if (!new_msk) fallback = true; } else if (subflow_req->mp_join) { - fallback_is_fatal = true; mptcp_get_options(skb, &mp_opt); if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); - return NULL; + fallback = true; } } @@ -522,10 +517,12 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, } else if (ctx->mp_join) { struct mptcp_sock *owner; - owner = mptcp_token_get_sock(ctx->token); + owner = subflow_req->msk; if (!owner) goto dispose_child; + /* move the msk reference ownership to the subflow */ + subflow_req->msk = NULL; ctx->conn = (struct sock *)owner; if (!mptcp_finish_join(child)) goto dispose_child; @@ -1053,8 +1050,10 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) err = tcp_set_ulp(sf->sk, "mptcp"); release_sock(sf->sk); - if (err) + if (err) { + sock_release(sf); return err; + } /* the newly created socket really belongs to the owning MPTCP master * socket, even if for additional subflows the allocation is performed diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 340cb955af25..56621d6bfd29 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; + if (align < ip_set_extensions[id].align) + align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index d7bd8b1f27d5..832eabecfbdd 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -939,7 +939,8 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) filter->mark.mask = 0xffffffff; } } else if (cda[CTA_MARK_MASK]) { - return ERR_PTR(-EINVAL); + err = -EINVAL; + goto err_filter; } #endif if (!cda[CTA_FILTER]) @@ -947,15 +948,17 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone); if (err < 0) - return ERR_PTR(err); + goto err_filter; err = ctnetlink_parse_filter(cda[CTA_FILTER], filter); if (err < 0) - return ERR_PTR(err); + goto err_filter; if (filter->orig_flags) { - if (!cda[CTA_TUPLE_ORIG]) - return ERR_PTR(-EINVAL); + if (!cda[CTA_TUPLE_ORIG]) { + err = -EINVAL; + goto err_filter; + } err = ctnetlink_parse_tuple_filter(cda, &filter->orig, CTA_TUPLE_ORIG, @@ -963,23 +966,32 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) &filter->zone, filter->orig_flags); if (err < 0) - return ERR_PTR(err); + goto err_filter; } if (filter->reply_flags) { - if (!cda[CTA_TUPLE_REPLY]) - return ERR_PTR(-EINVAL); + if (!cda[CTA_TUPLE_REPLY]) { + err = -EINVAL; + goto err_filter; + } err = ctnetlink_parse_tuple_filter(cda, &filter->reply, CTA_TUPLE_REPLY, filter->family, &filter->zone, filter->orig_flags); - if (err < 0) - return ERR_PTR(err); + if (err < 0) { + err = -EINVAL; + goto err_filter; + } } return filter; + +err_filter: + kfree(filter); + + return ERR_PTR(err); } static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda) diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c index f108a76925dd..2b01a151eaa8 100644 --- a/net/netfilter/nf_dup_netdev.c +++ b/net/netfilter/nf_dup_netdev.c @@ -73,3 +73,4 @@ EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_DESCRIPTION("Netfilter packet duplication support"); diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 6a3034f84ab6..b1eb5272b379 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -387,51 +387,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work) queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); } -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv) -{ - struct flow_block *block = &flow_table->flow_block; - struct flow_block_cb *block_cb; - int err = 0; - - down_write(&flow_table->flow_block_lock); - block_cb = flow_block_cb_lookup(block, cb, cb_priv); - if (block_cb) { - err = -EEXIST; - goto unlock; - } - - block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL); - if (IS_ERR(block_cb)) { - err = PTR_ERR(block_cb); - goto unlock; - } - - list_add_tail(&block_cb->list, &block->cb_list); - -unlock: - up_write(&flow_table->flow_block_lock); - return err; -} -EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb); - -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv) -{ - struct flow_block *block = &flow_table->flow_block; - struct flow_block_cb *block_cb; - - down_write(&flow_table->flow_block_lock); - block_cb = flow_block_cb_lookup(block, cb, cb_priv); - if (block_cb) { - list_del(&block_cb->list); - flow_block_cb_free(block_cb); - } else { - WARN_ON(true); - } - up_write(&flow_table->flow_block_lock); -} -EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb); static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, __be16 port, __be16 new_port) @@ -639,3 +594,4 @@ module_exit(nf_flow_table_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_DESCRIPTION("Netfilter flow table module"); diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c index 88bedf1ff1ae..bc4126d8ef65 100644 --- a/net/netfilter/nf_flow_table_inet.c +++ b/net/netfilter/nf_flow_table_inet.c @@ -72,3 +72,4 @@ module_exit(nf_flow_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */ +MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module"); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 62651e6683f6..5fff1e040168 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb) nf_flow_table_gc_cleanup(flowtable, dev); down_write(&flowtable->flow_block_lock); list_del(&block_cb->list); + list_del(&block_cb->driver_list); flow_block_cb_free(block_cb); up_write(&flowtable->flow_block_lock); } diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index b9cbe1e2453e..ebcdc8e54476 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -1237,3 +1237,4 @@ EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); +MODULE_DESCRIPTION("nftables SYNPROXY expression support"); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 073aa1051d43..7647ecfa0d40 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6550,12 +6550,22 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, return err; } +static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook) +{ + struct nft_hook *this, *next; + + list_for_each_entry_safe(this, next, &flowtable_hook->list, list) { + list_del(&this->list); + kfree(this); + } +} + static int nft_delflowtable_hook(struct nft_ctx *ctx, struct nft_flowtable *flowtable) { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; - struct nft_hook *this, *next, *hook; + struct nft_hook *this, *hook; struct nft_trans *trans; int err; @@ -6564,33 +6574,40 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, if (err < 0) return err; - list_for_each_entry_safe(this, next, &flowtable_hook.list, list) { + list_for_each_entry(this, &flowtable_hook.list, list) { hook = nft_hook_list_find(&flowtable->hook_list, this); if (!hook) { err = -ENOENT; goto err_flowtable_del_hook; } hook->inactive = true; - list_del(&this->list); - kfree(this); } trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE, sizeof(struct nft_trans_flowtable)); - if (!trans) - return -ENOMEM; + if (!trans) { + err = -ENOMEM; + goto err_flowtable_del_hook; + } nft_trans_flowtable(trans) = flowtable; nft_trans_flowtable_update(trans) = true; INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); + nft_flowtable_hook_release(&flowtable_hook); list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; err_flowtable_del_hook: - list_for_each_entry(hook, &flowtable_hook.list, list) + list_for_each_entry(this, &flowtable_hook.list, list) { + hook = nft_hook_list_find(&flowtable->hook_list, this); + if (!hook) + break; + hook->inactive = false; + } + nft_flowtable_hook_release(&flowtable_hook); return err; } diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 185fc82c99aa..c7cf1cde46de 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, basechain, &extack); mutex_lock(&net->nft.commit_mutex); + list_del(&block_cb->driver_list); list_move(&block_cb->list, &bo.cb_list); nft_flow_offload_unbind(&bo, basechain); mutex_unlock(&net->nft.commit_mutex); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 99127e2d95a8..5f24edf95830 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -33,6 +33,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); +MODULE_DESCRIPTION("Netfilter messages via netlink socket"); #define nfnl_dereference_protected(id) \ rcu_dereference_protected(table[(id)].subsys, \ diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index f9adca62ccb3..aa1a066cb74b 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -902,3 +902,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_EXPR("match"); MODULE_ALIAS_NFT_EXPR("target"); +MODULE_DESCRIPTION("x_tables over nftables support"); diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 69d6173f91e2..7d0761fad37e 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -280,3 +280,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso"); MODULE_ALIAS_NFT_EXPR("connlimit"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT); +MODULE_DESCRIPTION("nftables connlimit rule support"); diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index f6d4d0fa23a6..85ed461ec24e 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c @@ -303,3 +303,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_EXPR("counter"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER); +MODULE_DESCRIPTION("nftables counter rule support"); diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index faea72c2df32..77258af1fce0 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -1345,3 +1345,4 @@ MODULE_ALIAS_NFT_EXPR("notrack"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT); +MODULE_DESCRIPTION("Netfilter nf_tables conntrack module"); diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c index c2e78c160fd7..40788b3f1071 100644 --- a/net/netfilter/nft_dup_netdev.c +++ b/net/netfilter/nft_dup_netdev.c @@ -102,3 +102,4 @@ module_exit(nft_dup_netdev_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_AF_EXPR(5, "dup"); +MODULE_DESCRIPTION("nftables netdev packet duplication support"); diff --git a/net/netfilter/nft_fib_inet.c b/net/netfilter/nft_fib_inet.c index 465432e0531b..a88d44e163d1 100644 --- a/net/netfilter/nft_fib_inet.c +++ b/net/netfilter/nft_fib_inet.c @@ -76,3 +76,4 @@ module_exit(nft_fib_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal "); MODULE_ALIAS_NFT_AF_EXPR(1, "fib"); +MODULE_DESCRIPTION("nftables fib inet support"); diff --git a/net/netfilter/nft_fib_netdev.c b/net/netfilter/nft_fib_netdev.c index a2e726ae7f07..3f3478abd845 100644 --- a/net/netfilter/nft_fib_netdev.c +++ b/net/netfilter/nft_fib_netdev.c @@ -85,3 +85,4 @@ module_exit(nft_fib_netdev_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo M. Bermudo Garay "); MODULE_ALIAS_NFT_AF_EXPR(5, "fib"); +MODULE_DESCRIPTION("nftables netdev fib lookups support"); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index b70b48996801..3b9b97aa4b32 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -286,3 +286,4 @@ module_exit(nft_flow_offload_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_EXPR("flow_offload"); +MODULE_DESCRIPTION("nftables hardware flow offload module"); diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index b836d550b919..96371d878e7e 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -248,3 +248,4 @@ module_exit(nft_hash_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Laura Garcia "); MODULE_ALIAS_NFT_EXPR("hash"); +MODULE_DESCRIPTION("Netfilter nftables hash module"); diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index 35b67d7e3694..0e2c315c3b5e 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c @@ -372,3 +372,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_EXPR("limit"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT); +MODULE_DESCRIPTION("nftables limit expression support"); diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c index fe4831f2258f..57899454a530 100644 --- a/net/netfilter/nft_log.c +++ b/net/netfilter/nft_log.c @@ -298,3 +298,4 @@ module_exit(nft_log_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_EXPR("log"); +MODULE_DESCRIPTION("Netfilter nf_tables log module"); diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index bc9fd98c5d6d..71390b727040 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -305,3 +305,4 @@ module_exit(nft_masq_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez "); MODULE_ALIAS_NFT_EXPR("masq"); +MODULE_DESCRIPTION("Netfilter nftables masquerade expression support"); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 23a7bfd10521..4bcf33b049c4 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -402,3 +402,4 @@ module_exit(nft_nat_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tomasz Bursztyka "); MODULE_ALIAS_NFT_EXPR("nat"); +MODULE_DESCRIPTION("Network Address Translation support"); diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c index 48edb9d5f012..f1fc824f9737 100644 --- a/net/netfilter/nft_numgen.c +++ b/net/netfilter/nft_numgen.c @@ -217,3 +217,4 @@ module_exit(nft_ng_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Laura Garcia "); MODULE_ALIAS_NFT_EXPR("numgen"); +MODULE_DESCRIPTION("nftables number generator module"); diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index bfd18d2b65a2..5f9207a9f485 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -252,3 +252,4 @@ module_exit(nft_objref_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_EXPR("objref"); +MODULE_DESCRIPTION("nftables stateful object reference module"); diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c index b42247aa48a9..c261d57a666a 100644 --- a/net/netfilter/nft_osf.c +++ b/net/netfilter/nft_osf.c @@ -149,3 +149,4 @@ module_exit(nft_osf_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez "); MODULE_ALIAS_NFT_EXPR("osf"); +MODULE_DESCRIPTION("nftables passive OS fingerprint support"); diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c index 5ece0a6aa8c3..23265d757acb 100644 --- a/net/netfilter/nft_queue.c +++ b/net/netfilter/nft_queue.c @@ -216,3 +216,4 @@ module_exit(nft_queue_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Leblond "); MODULE_ALIAS_NFT_EXPR("queue"); +MODULE_DESCRIPTION("Netfilter nftables queue module"); diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index 4413690591f2..0363f533a42b 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -254,3 +254,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_EXPR("quota"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA); +MODULE_DESCRIPTION("Netfilter nftables quota module"); diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 5b779171565c..2056051c0af0 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -292,3 +292,4 @@ module_exit(nft_redir_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez "); MODULE_ALIAS_NFT_EXPR("redir"); +MODULE_DESCRIPTION("Netfilter nftables redirect support"); diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 00f865fb80ca..86eafbb0fdd0 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -119,3 +119,4 @@ EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); +MODULE_DESCRIPTION("Netfilter x_tables over nftables module"); diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index f41f414b72d1..cf8f2646e93c 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -149,3 +149,4 @@ module_exit(nft_reject_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy "); MODULE_ALIAS_NFT_AF_EXPR(1, "reject"); +MODULE_DESCRIPTION("Netfilter nftables reject inet support"); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 8b5acc6910fd..8c04388296b0 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1242,7 +1242,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f); } - if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) { + if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) { + put_cpu_ptr(m->scratch); + err = pipapo_realloc_scratch(m, bsize_max); if (err) return err; @@ -1250,6 +1252,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, this_cpu_write(nft_pipapo_scratch_index, false); m->bsize_max = bsize_max; + } else { + put_cpu_ptr(m->scratch); } *ext2 = &e->ext; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 62f416bc0579..b6aad3fc46c3 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (nft_rbtree_interval_start(new)) { if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask)) + nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; } else { overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, - genmask); + genmask) && + !nft_set_elem_expired(&rbe->ext); } } else if (d > 0) { p = &parent->rb_right; @@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (nft_rbtree_interval_end(new)) { overlap = nft_rbtree_interval_end(rbe) && nft_set_elem_active(&rbe->ext, - genmask); + genmask) && + !nft_set_elem_expired(&rbe->ext); } else if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask)) { + nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) { overlap = true; } } else { @@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, nft_rbtree_interval_start(new)) { p = &parent->rb_left; - if (nft_set_elem_active(&rbe->ext, genmask)) + if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; } else if (nft_rbtree_interval_start(rbe) && nft_rbtree_interval_end(new)) { p = &parent->rb_right; - if (nft_set_elem_active(&rbe->ext, genmask)) + if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) overlap = false; - } else if (nft_set_elem_active(&rbe->ext, genmask)) { + } else if (nft_set_elem_active(&rbe->ext, genmask) && + !nft_set_elem_expired(&rbe->ext)) { *ext = &rbe->ext; return -EEXIST; } else { diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index e2c1fc608841..4fda8b3f1762 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -388,3 +388,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez "); MODULE_ALIAS_NFT_EXPR("synproxy"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY); +MODULE_DESCRIPTION("nftables SYNPROXY expression support"); diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index 30be5787fbde..d3eb953d0333 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -719,3 +719,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso "); MODULE_ALIAS_NFT_EXPR("tunnel"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL); +MODULE_DESCRIPTION("nftables tunnel expression support"); diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c index a8e5f6c8db7a..b4f7bbc3f3ca 100644 --- a/net/netfilter/xt_nat.c +++ b/net/netfilter/xt_nat.c @@ -244,3 +244,4 @@ MODULE_ALIAS("ipt_SNAT"); MODULE_ALIAS("ipt_DNAT"); MODULE_ALIAS("ip6t_SNAT"); MODULE_ALIAS("ip6t_DNAT"); +MODULE_DESCRIPTION("SNAT and DNAT targets support"); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fc0efd8833c8..2611657f40ca 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr, bool last) { + struct ovs_skb_cb *ovs_cb = OVS_CB(skb); const struct nlattr *actions, *cpl_arg; + int len, max_len, rem = nla_len(attr); const struct check_pkt_len_arg *arg; - int rem = nla_len(attr); bool clone_flow_key; /* The first netlink attribute in 'attr' is always @@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb, cpl_arg = nla_data(attr); arg = nla_data(cpl_arg); - if (skb->len <= arg->pkt_len) { + len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len; + max_len = arg->pkt_len; + + if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) || + len <= max_len) { /* Second netlink attribute in 'attr' is always * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'. */ diff --git a/net/rds/ib.h b/net/rds/ib.h index 5ae069d39eab..8dfff43cf07f 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -264,7 +264,13 @@ struct rds_ib_device { int *vector_load; }; -#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent) +static inline int ibdev_to_node(struct ib_device *ibdev) +{ + struct device *parent; + + parent = ibdev->dev.parent; + return parent ? dev_to_node(parent) : NUMA_NO_NODE; +} #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) /* bits for i_ack_flags */ diff --git a/net/rds/transport.c b/net/rds/transport.c index 46f709a4b577..f8001ec80867 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -38,6 +38,12 @@ #include "rds.h" #include "loop.h" +static char * const rds_trans_modules[] = { + [RDS_TRANS_IB] = "rds_rdma", + [RDS_TRANS_GAP] = NULL, + [RDS_TRANS_TCP] = "rds_tcp", +}; + static struct rds_transport *transports[RDS_TRANS_COUNT]; static DECLARE_RWSEM(rds_trans_sem); @@ -110,18 +116,20 @@ struct rds_transport *rds_trans_get(int t_type) { struct rds_transport *ret = NULL; struct rds_transport *trans; - unsigned int i; down_read(&rds_trans_sem); - for (i = 0; i < RDS_TRANS_COUNT; i++) { - trans = transports[i]; - - if (trans && trans->t_type == t_type && - (!trans->t_owner || try_module_get(trans->t_owner))) { - ret = trans; - break; - } + trans = transports[t_type]; + if (!trans) { + up_read(&rds_trans_sem); + if (rds_trans_modules[t_type]) + request_module(rds_trans_modules[t_type]); + down_read(&rds_trans_sem); + trans = transports[t_type]; } + if (trans && trans->t_type == t_type && + (!trans->t_owner || try_module_get(trans->t_owner))) + ret = trans; + up_read(&rds_trans_sem); return ret; diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index b7611cc159e5..032ed76c0166 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -22,6 +22,11 @@ #include #include "ar-internal.h" +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, + unsigned long user_call_ID) +{ +} + /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. @@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); + if (call->notify_rx) + call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index aa1c8eee6557..6be2672a65ea 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -253,7 +253,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) * confuse things */ annotation &= ~RXRPC_TX_ANNO_MASK; - annotation |= RXRPC_TX_ANNO_RESENT; + annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT; call->rxtx_annotations[ix] = annotation; skb = call->rxtx_buffer[ix]; diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 299ac98e9754..767579328a06 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -722,13 +722,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (call->tx_winsize != rwind) { - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, - ntohl(ackinfo->rwind), wake); + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); call->tx_winsize = rwind; } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e29f0f45d688..e9f3576cbf71 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1543,17 +1543,6 @@ static void __exit ct_cleanup_module(void) destroy_workqueue(act_ct_wq); } -void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) -{ - enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK; - struct nf_conn *ct; - - ct = (struct nf_conn *)(cookie & NFCT_PTRMASK); - nf_conntrack_get(&ct->ct_general); - nf_ct_set(skb, ct, ctinfo); -} -EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb); - module_init(ct_init_module); module_exit(ct_cleanup_module); MODULE_AUTHOR("Paul Blakey "); diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 9c628591f452..323ae7f6315d 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -32,7 +32,7 @@ static ktime_t gate_get_time(struct tcf_gate *gact) return KTIME_MAX; } -static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start) +static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) { struct tcf_gate_params *param = &gact->param; ktime_t now, base, cycle; @@ -43,18 +43,13 @@ static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start) if (ktime_after(base, now)) { *start = base; - return 0; + return; } cycle = param->tcfg_cycletime; - /* cycle time should not be zero */ - if (!cycle) - return -EFAULT; - n = div64_u64(ktime_sub_ns(now, base), cycle); *start = ktime_add_ns(base, (n + 1) * cycle); - return 0; } static void gate_start_timer(struct tcf_gate *gact, ktime_t start) @@ -277,6 +272,27 @@ static int parse_gate_list(struct nlattr *list_attr, return err; } +static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, + enum tk_offsets tko, s32 clockid, + bool do_init) +{ + if (!do_init) { + if (basetime == gact->param.tcfg_basetime && + tko == gact->tk_offset && + clockid == gact->param.tcfg_clockid) + return; + + spin_unlock_bh(&gact->tcf_lock); + hrtimer_cancel(&gact->hitimer); + spin_lock_bh(&gact->tcf_lock); + } + gact->param.tcfg_basetime = basetime; + gact->param.tcfg_clockid = clockid; + gact->tk_offset = tko; + hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT); + gact->hitimer.function = gate_timer_func; +} + static int tcf_gate_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, bool rtnl_held, @@ -287,12 +303,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, enum tk_offsets tk_offset = TK_OFFS_TAI; struct nlattr *tb[TCA_GATE_MAX + 1]; struct tcf_chain *goto_ch = NULL; + u64 cycletime = 0, basetime = 0; struct tcf_gate_params *p; s32 clockid = CLOCK_TAI; struct tcf_gate *gact; struct tc_gate *parm; int ret = 0, err; - u64 basetime = 0; u32 gflags = 0; s32 prio = -1; ktime_t start; @@ -308,6 +324,27 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (!tb[TCA_GATE_PARMS]) return -EINVAL; + if (tb[TCA_GATE_CLOCKID]) { + clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); + switch (clockid) { + case CLOCK_REALTIME: + tk_offset = TK_OFFS_REAL; + break; + case CLOCK_MONOTONIC: + tk_offset = TK_OFFS_MAX; + break; + case CLOCK_BOOTTIME: + tk_offset = TK_OFFS_BOOT; + break; + case CLOCK_TAI: + tk_offset = TK_OFFS_TAI; + break; + default: + NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); + return -EINVAL; + } + } + parm = nla_data(tb[TCA_GATE_PARMS]); index = parm->index; @@ -331,10 +368,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, tcf_idr_release(*a, bind); return -EEXIST; } - if (ret == ACT_P_CREATED) { - to_gate(*a)->param.tcfg_clockid = -1; - INIT_LIST_HEAD(&(to_gate(*a)->param.entries)); - } if (tb[TCA_GATE_PRIORITY]) prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); @@ -345,41 +378,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (tb[TCA_GATE_FLAGS]) gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); - if (tb[TCA_GATE_CLOCKID]) { - clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); - switch (clockid) { - case CLOCK_REALTIME: - tk_offset = TK_OFFS_REAL; - break; - case CLOCK_MONOTONIC: - tk_offset = TK_OFFS_MAX; - break; - case CLOCK_BOOTTIME: - tk_offset = TK_OFFS_BOOT; - break; - case CLOCK_TAI: - tk_offset = TK_OFFS_TAI; - break; - default: - NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); - goto release_idr; - } - } + gact = to_gate(*a); + if (ret == ACT_P_CREATED) + INIT_LIST_HEAD(&gact->param.entries); err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; - gact = to_gate(*a); - spin_lock_bh(&gact->tcf_lock); p = &gact->param; - if (tb[TCA_GATE_CYCLE_TIME]) { - p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); - if (!p->tcfg_cycletime_ext) - goto chain_put; - } + if (tb[TCA_GATE_CYCLE_TIME]) + cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); if (tb[TCA_GATE_ENTRY_LIST]) { err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); @@ -387,35 +398,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, goto chain_put; } - if (!p->tcfg_cycletime) { + if (!cycletime) { struct tcfg_gate_entry *entry; ktime_t cycle = 0; list_for_each_entry(entry, &p->entries, list) cycle = ktime_add_ns(cycle, entry->interval); - p->tcfg_cycletime = cycle; + cycletime = cycle; + if (!cycletime) { + err = -EINVAL; + goto chain_put; + } } + p->tcfg_cycletime = cycletime; if (tb[TCA_GATE_CYCLE_TIME_EXT]) p->tcfg_cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); + gate_setup_timer(gact, basetime, tk_offset, clockid, + ret == ACT_P_CREATED); p->tcfg_priority = prio; - p->tcfg_basetime = basetime; - p->tcfg_clockid = clockid; p->tcfg_flags = gflags; - - gact->tk_offset = tk_offset; - hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT); - gact->hitimer.function = gate_timer_func; - - err = gate_get_start_time(gact, &start); - if (err < 0) { - NL_SET_ERR_MSG(extack, - "Internal error: failed get start time"); - release_entry_list(&p->entries); - goto chain_put; - } + gate_get_start_time(gact, &start); gact->current_close_time = start; gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; @@ -443,6 +448,13 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla, if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: + /* action is not inserted in any list: it's safe to init hitimer + * without taking tcf_lock. + */ + if (ret == ACT_P_CREATED) + gate_setup_timer(gact, gact->param.tcfg_basetime, + gact->tk_offset, gact->param.tcfg_clockid, + true); tcf_idr_release(*a, bind); return err; } @@ -453,9 +465,7 @@ static void tcf_gate_cleanup(struct tc_action *a) struct tcf_gate_params *p; p = &gact->param; - if (p->tcfg_clockid != -1) - hrtimer_cancel(&gact->hitimer); - + hrtimer_cancel(&gact->hitimer); release_entry_list(&p->entries); } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a00a203b2ef5..faa78b7dd962 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -652,6 +652,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) &block->flow_block, tcf_block_shared(block), &extack); down_write(&block->cb_lock); + list_del(&block_cb->driver_list); list_move(&block_cb->list, &bo.cb_list); up_write(&block->cb_lock); rtnl_lock(); @@ -671,25 +672,29 @@ static int tcf_block_offload_cmd(struct tcf_block *block, struct netlink_ext_ack *extack) { struct flow_block_offload bo = {}; - int err; tcf_block_offload_init(&bo, dev, command, ei->binder_type, &block->flow_block, tcf_block_shared(block), extack); - if (dev->netdev_ops->ndo_setup_tc) - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); - else - err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, - &bo, tc_block_indr_cleanup); + if (dev->netdev_ops->ndo_setup_tc) { + int err; - if (err < 0) { - if (err != -EOPNOTSUPP) - NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); - return err; + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); + if (err < 0) { + if (err != -EOPNOTSUPP) + NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); + return err; + } + + return tcf_block_setup(block, &bo); } - return tcf_block_setup(block, &bo); + flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, + tc_block_indr_cleanup); + tcf_block_setup(block, &bo); + + return -EOPNOTSUPP; } static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 60f8ae578819..ca813697728e 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1551,32 +1551,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) return idx + (tin << 16); } -static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) +static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash) { - int wlen = skb_network_offset(skb); + const int offset = skb_network_offset(skb); + u16 *buf, buf_; u8 dscp; switch (tc_skb_protocol(skb)) { case htons(ETH_P_IP): - wlen += sizeof(struct iphdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; - if (wash && dscp) + /* ToS is in the second byte of iphdr */ + dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct iphdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_IPV6): - wlen += sizeof(struct ipv6hdr); - if (!pskb_may_pull(skb, wlen) || - skb_try_make_writable(skb, wlen)) + buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); + if (unlikely(!buf)) return 0; - dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; - if (wash && dscp) + /* Traffic class is in the first and second bytes of ipv6hdr */ + dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2; + + if (wash && dscp) { + const int wlen = offset + sizeof(struct ipv6hdr); + + if (!pskb_may_pull(skb, wlen) || + skb_try_make_writable(skb, wlen)) + return 0; + ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); + } + return dscp; case htons(ETH_P_ARP): @@ -1593,14 +1612,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, { struct cake_sched_data *q = qdisc_priv(sch); u32 tin, mark; + bool wash; u8 dscp; /* Tin selection: Default to diffserv-based selection, allow overriding - * using firewall marks or skb->priority. + * using firewall marks or skb->priority. Call DSCP parsing early if + * wash is enabled, otherwise defer to below to skip unneeded parsing. */ - dscp = cake_handle_diffserv(skb, - q->rate_flags & CAKE_FLAG_WASH); mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; + wash = !!(q->rate_flags & CAKE_FLAG_WASH); + if (wash) + dscp = cake_handle_diffserv(skb, wash); if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) tin = 0; @@ -1614,6 +1636,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; else { + if (!wash) + dscp = cake_handle_diffserv(skb, wash); tin = q->tin_index[dscp]; if (unlikely(tin >= q->tin_cnt)) @@ -2691,7 +2715,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt, qdisc_watchdog_init(&q->watchdog, sch); if (opt) { - int err = cake_change(sch, opt, extack); + err = cake_change(sch, opt, extack); if (err) return err; @@ -3008,7 +3032,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl, PUT_STAT_S32(BLUE_TIMER_US, ktime_to_us( ktime_sub(now, - flow->cvars.blue_timer))); + flow->cvars.blue_timer))); } if (flow->cvars.dropping) { PUT_STAT_S32(DROP_NEXT_US, diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 8f06a808c59a..2fb76fc0cc31 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -1075,3 +1075,4 @@ module_init(fq_module_init) module_exit(fq_module_exit) MODULE_AUTHOR("Eric Dumazet"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Fair Queue Packet Scheduler"); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 436160be9c18..459a784056c0 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -721,3 +721,4 @@ module_init(fq_codel_module_init) module_exit(fq_codel_module_exit) MODULE_AUTHOR("Eric Dumazet"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Fair Queue CoDel discipline"); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index be35f03b657b..420ede875322 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -721,3 +721,4 @@ module_exit(hhf_module_exit) MODULE_AUTHOR("Terry Lam"); MODULE_AUTHOR("Nandita Dukkipati"); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)"); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 72315137d7e7..8d735461fa19 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, enum sctp_scope scope, gfp_t gfp) { + struct sock *sk = asoc->base.sk; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (!inet_v6_ipv6only(sk)) + flags |= SCTP_ADDR4_ALLOWED; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 53bc61537f44..701c5a4e441d 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, * well as the remote peer. */ if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_ALLOWED) && (flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 092d1afdee0d..cde29f3c7fb3 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, * sock as well as the remote peer. */ if (addr->a.sa.sa_family == AF_INET && - !(copy_flags & SCTP_ADDR4_PEERSUPP)) + (!(copy_flags & SCTP_ADDR4_ALLOWED) || + !(copy_flags & SCTP_ADDR4_PEERSUPP))) continue; if (addr->a.sa.sa_family == AF_INET6 && (!(copy_flags & SCTP_ADDR6_ALLOWED) || diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 39e14d5edaf1..e9d0953522f0 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) q.len = strlen(gssd_dummy_clnt_dir[0].name); clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); if (!clnt_dentry) { + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); goto out; } diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 6f7d82fb1eb0..be11d672b5b9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->head[0].iov_len; + subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } @@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->page_len; + subbuf->pages = buf->pages; + subbuf->page_base = 0; subbuf->page_len = 0; } @@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->tail[0].iov_len; + subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index ef997880e17a..b647562a26dd 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -367,7 +367,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) trace_xprtrdma_wc_fastreg(wc, frwr); /* The MR will get recycled when the associated req is retransmitted */ - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_flush_disconnect(cq->cq_context, wc); } /** @@ -452,7 +452,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) trace_xprtrdma_wc_li(wc, frwr); __frwr_release_mr(wc, mr); - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_flush_disconnect(cq->cq_context, wc); } /** @@ -474,7 +474,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) __frwr_release_mr(wc, mr); complete(&frwr->fr_linv_done); - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_flush_disconnect(cq->cq_context, wc); } /** @@ -582,7 +582,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) smp_rmb(); rpcrdma_complete_rqst(rep); - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_flush_disconnect(cq->cq_context, wc); } /** diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 2081c8fbfa48..935bbef2f7be 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, be32_to_cpup(p), be32_to_cpu(rep->rr_xid)); } - r_xprt->rx_stats.bad_reply_count++; - return -EREMOTEIO; + return -EIO; } /* Perform XID lookup, reconstruction of the RPC reply, and @@ -1387,13 +1386,11 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep) spin_unlock(&xprt->queue_lock); return; -/* If the incoming reply terminated a pending RPC, the next - * RPC call will post a replacement receive buffer as it is - * being marshaled. - */ out_badheader: trace_xprtrdma_reply_hdr(rep); r_xprt->rx_stats.bad_reply_count++; + rqst->rq_task->tk_status = status; + status = 0; goto out; } diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 0c4af7f5e241..14165b673b20 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -242,7 +242,7 @@ xprt_rdma_connect_worker(struct work_struct *work) rc = rpcrdma_xprt_connect(r_xprt); xprt_clear_connecting(xprt); - if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) { + if (!rc) { xprt->connect_cookie++; xprt->stat.connect_count++; xprt->stat.connect_time += (long)jiffies - diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2ae348377806..2198c8ec8dff 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -84,7 +84,8 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep); static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt); -static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep); +static void rpcrdma_ep_get(struct rpcrdma_ep *ep); +static int rpcrdma_ep_put(struct rpcrdma_ep *ep); static struct rpcrdma_regbuf * rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, gfp_t flags); @@ -97,7 +98,8 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); */ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) { - struct rdma_cm_id *id = r_xprt->rx_ep->re_id; + struct rpcrdma_ep *ep = r_xprt->rx_ep; + struct rdma_cm_id *id = ep->re_id; /* Flush Receives, then wait for deferred Reply work * to complete. @@ -108,6 +110,8 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt) * local invalidations. */ ib_drain_sq(id->qp); + + rpcrdma_ep_put(ep); } /** @@ -126,23 +130,27 @@ static void rpcrdma_qp_event_handler(struct ib_event *event, void *context) trace_xprtrdma_qp_event(ep, event); } +/* Ensure xprt_force_disconnect() is invoked exactly once when a + * connection is closed or lost. (The important thing is it needs + * to be invoked "at least" once). + */ +static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep) +{ + if (atomic_add_unless(&ep->re_force_disconnect, 1, 1)) + xprt_force_disconnect(ep->re_xprt); +} + /** * rpcrdma_flush_disconnect - Disconnect on flushed completion - * @cq: completion queue + * @r_xprt: transport to disconnect * @wc: work completion entry * * Must be called in process context. */ -void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc) +void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc) { - struct rpcrdma_xprt *r_xprt = cq->cq_context; - struct rpc_xprt *xprt = &r_xprt->rx_xprt; - - if (wc->status != IB_WC_SUCCESS && - r_xprt->rx_ep->re_connect_status == 1) { - r_xprt->rx_ep->re_connect_status = -ECONNABORTED; - xprt_force_disconnect(xprt); - } + if (wc->status != IB_WC_SUCCESS) + rpcrdma_force_disconnect(r_xprt->rx_ep); } /** @@ -156,11 +164,12 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) struct ib_cqe *cqe = wc->wr_cqe; struct rpcrdma_sendctx *sc = container_of(cqe, struct rpcrdma_sendctx, sc_cqe); + struct rpcrdma_xprt *r_xprt = cq->cq_context; /* WARNING: Only wr_cqe and status are reliable at this point */ trace_xprtrdma_wc_send(sc, wc); - rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc); - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_sendctx_put_locked(r_xprt, sc); + rpcrdma_flush_disconnect(r_xprt, wc); } /** @@ -195,7 +204,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) return; out_flushed: - rpcrdma_flush_disconnect(cq, wc); + rpcrdma_flush_disconnect(r_xprt, wc); rpcrdma_rep_destroy(rep); } @@ -239,7 +248,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr; struct rpcrdma_ep *ep = id->context; - struct rpc_xprt *xprt = ep->re_xprt; might_sleep(); @@ -263,10 +271,9 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) /* fall through */ case RDMA_CM_EVENT_ADDR_CHANGE: ep->re_connect_status = -ENODEV; - xprt_force_disconnect(xprt); goto disconnected; case RDMA_CM_EVENT_ESTABLISHED: - kref_get(&ep->re_kref); + rpcrdma_ep_get(ep); ep->re_connect_status = 1; rpcrdma_update_cm_private(ep, &event->param.conn); trace_xprtrdma_inline_thresh(ep); @@ -288,8 +295,8 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DISCONNECTED: ep->re_connect_status = -ECONNABORTED; disconnected: - xprt_force_disconnect(xprt); - return rpcrdma_ep_destroy(ep); + rpcrdma_force_disconnect(ep); + return rpcrdma_ep_put(ep); default: break; } @@ -345,7 +352,7 @@ static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt, return ERR_PTR(rc); } -static void rpcrdma_ep_put(struct kref *kref) +static void rpcrdma_ep_destroy(struct kref *kref) { struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref); @@ -369,13 +376,18 @@ static void rpcrdma_ep_put(struct kref *kref) module_put(THIS_MODULE); } +static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep) +{ + kref_get(&ep->re_kref); +} + /* Returns: * %0 if @ep still has a positive kref count, or * %1 if @ep was destroyed successfully. */ -static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep) +static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep) { - return kref_put(&ep->re_kref, rpcrdma_ep_put); + return kref_put(&ep->re_kref, rpcrdma_ep_destroy); } static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) @@ -492,7 +504,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) return 0; out_destroy: - rpcrdma_ep_destroy(ep); + rpcrdma_ep_put(ep); rdma_destroy_id(id); out_free: kfree(ep); @@ -519,10 +531,13 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) return rc; ep = r_xprt->rx_ep; - ep->re_connect_status = 0; xprt_clear_connected(xprt); - rpcrdma_reset_cwnd(r_xprt); + + /* Bump the ep's reference count while there are + * outstanding Receives. + */ + rpcrdma_ep_get(ep); rpcrdma_post_recvs(r_xprt, true); rc = rpcrdma_sendctxs_create(r_xprt); @@ -552,8 +567,6 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) rpcrdma_mrs_create(r_xprt); out: - if (rc) - ep->re_connect_status = rc; trace_xprtrdma_connect(r_xprt, rc); return rc; } @@ -587,7 +600,7 @@ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt) rpcrdma_mrs_destroy(r_xprt); rpcrdma_sendctxs_destroy(r_xprt); - if (rpcrdma_ep_destroy(ep)) + if (rpcrdma_ep_put(ep)) rdma_destroy_id(id); r_xprt->rx_ep = NULL; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 0a16fdb09b2c..43974ef39a50 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -82,6 +82,7 @@ struct rpcrdma_ep { unsigned int re_max_inline_recv; int re_async_rc; int re_connect_status; + atomic_t re_force_disconnect; struct ib_qp_init_attr re_attr; wait_queue_head_t re_connect_wait; struct rpc_xprt *re_xprt; @@ -446,7 +447,7 @@ extern unsigned int xprt_rdma_memreg_strategy; /* * Endpoint calls - xprtrdma/verbs.c */ -void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc); +void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc); int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt); void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt); diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig index b5d4a1ef04b9..5b9a5ab48111 100644 --- a/net/xfrm/Kconfig +++ b/net/xfrm/Kconfig @@ -67,6 +67,30 @@ config XFRM_STATISTICS If unsure, say N. +# This option selects XFRM_ALGO along with the AH authentication algorithms that +# RFC 8221 lists as MUST be implemented. +config XFRM_AH + tristate + select XFRM_ALGO + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA256 + +# This option selects XFRM_ALGO along with the ESP encryption and authentication +# algorithms that RFC 8221 lists as MUST be implemented. +config XFRM_ESP + tristate + select XFRM_ALGO + select CRYPTO + select CRYPTO_AES + select CRYPTO_AUTHENC + select CRYPTO_CBC + select CRYPTO_ECHAINIV + select CRYPTO_GCM + select CRYPTO_HMAC + select CRYPTO_SEQIV + select CRYPTO_SHA256 + config XFRM_IPCOMP tristate select XFRM_ALGO diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index f50d1f97cf8e..626096bd0d29 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct xfrm_offload *xo = xfrm_offload(skb); struct sec_path *sp; - if (!xo) + if (!xo || (xo->flags & XFRM_XMIT)) return skb; if (!(features & NETIF_F_HW_ESP)) @@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } + xo->flags |= XFRM_XMIT; + if (skb_is_gso(skb)) { struct net_device *dev = skb->dev; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index e4c23f69f69f..a7ab19353313 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -574,16 +574,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) switch (x->outer_mode.family) { case AF_INET: memset(IPCB(skb), 0, sizeof(*IPCB(skb))); -#ifdef CONFIG_NETFILTER IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; -#endif break; case AF_INET6: memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif break; } diff --git a/samples/Kconfig b/samples/Kconfig index f3ac549a53b0..0ed6e4d71d87 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -211,7 +211,7 @@ config SAMPLE_WATCHDOG config SAMPLE_WATCH_QUEUE bool "Build example /dev/watch_queue notification consumer" - depends on HEADERS_INSTALL + depends on CC_CAN_LINK && HEADERS_INSTALL help Build example userspace program to use the new mount_notify(), sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function. diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c index dd558cbb2309..ef53b93db573 100644 --- a/samples/bpf/xdp_monitor_user.c +++ b/samples/bpf/xdp_monitor_user.c @@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size) { unsigned int nr_cpus = bpf_num_possible_cpus(); void *array; - size_t size; - size = record_size * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, record_size); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void) int i; /* Alloc main stats_record structure */ - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(*rec)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index f3468168982e..f4e755e0dd73 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -207,11 +207,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -226,11 +223,11 @@ static struct stats_record *alloc_stats_record(void) size = sizeof(*rec) + n_cpus * sizeof(struct record); rec = malloc(size); - memset(rec, 0, size); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); } + memset(rec, 0, size); rec->rx_cnt.cpu = alloc_record_per_cpu(); rec->redir_err.cpu = alloc_record_per_cpu(); rec->kthread.cpu = alloc_record_per_cpu(); diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index 4fe47502ebed..caa4e7ffcfc7 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec *array; - size_t size; - size = sizeof(struct datarec) * nr_cpus; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_cpus, sizeof(struct datarec)); if (!array) { fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); exit(EXIT_FAIL_MEM); @@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void) { unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; struct record *array; - size_t size; - size = sizeof(struct record) * nr_rxqs; - array = malloc(size); - memset(array, 0, size); + array = calloc(nr_rxqs, sizeof(struct record)); if (!array) { fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs); exit(EXIT_FAIL_MEM); @@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void) struct stats_record *rec; int i; - rec = malloc(sizeof(*rec)); - memset(rec, 0, sizeof(*rec)); + rec = calloc(1, sizeof(struct stats_record)); if (!rec) { fprintf(stderr, "Mem alloc error\n"); exit(EXIT_FAIL_MEM); diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c index d523450d73eb..6aba02a31c96 100644 --- a/samples/ftrace/sample-trace-array.c +++ b/samples/ftrace/sample-trace-array.c @@ -6,6 +6,7 @@ #include #include #include +#include /* * Any file that uses trace points, must include the header. @@ -20,6 +21,16 @@ struct trace_array *tr; static void mytimer_handler(struct timer_list *unused); static struct task_struct *simple_tsk; +static void trace_work_fn(struct work_struct *work) +{ + /* + * Disable tracing for event "sample_event". + */ + trace_array_set_clr_event(tr, "sample-subsystem", "sample_event", + false); +} +static DECLARE_WORK(trace_work, trace_work_fn); + /* * mytimer: Timer setup to disable tracing for event "sample_event". This * timer is only for the purposes of the sample module to demonstrate access of @@ -29,11 +40,7 @@ static DEFINE_TIMER(mytimer, mytimer_handler); static void mytimer_handler(struct timer_list *unused) { - /* - * Disable tracing for event "sample_event". - */ - trace_array_set_clr_event(tr, "sample-subsystem", "sample_event", - false); + schedule_work(&trace_work); } static void simple_thread_func(int count) @@ -76,6 +83,7 @@ static int simple_thread(void *arg) simple_thread_func(count++); del_timer(&mytimer); + cancel_work_sync(&trace_work); /* * trace_array_put() decrements the reference counter associated with @@ -107,8 +115,12 @@ static int __init sample_trace_array_init(void) trace_printk_init_buffers(); simple_tsk = kthread_run(simple_thread, NULL, "sample-instance"); - if (IS_ERR(simple_tsk)) + if (IS_ERR(simple_tsk)) { + trace_array_put(tr); + trace_array_destroy(tr); return -1; + } + return 0; } diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index 32234481ad7d..ad3e56042f96 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c @@ -267,7 +267,7 @@ struct amt_host_if_msg_header { struct amt_host_if_resp_header { struct amt_host_if_msg_header header; uint32_t status; - unsigned char data[0]; + unsigned char data[]; } __attribute__((packed)); const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, \ diff --git a/samples/watch_queue/Makefile b/samples/watch_queue/Makefile index 8511fb6c53d2..792b22f593cf 100644 --- a/samples/watch_queue/Makefile +++ b/samples/watch_queue/Makefile @@ -1,7 +1,5 @@ -# List of programs to build -hostprogs := watch_test +# SPDX-License-Identifier: GPL-2.0-only +userprogs := watch_test +always-y := $(userprogs) -# Tell kbuild to always build the programs -always-y := $(hostprogs) - -HOSTCFLAGS_watch_test.o += -I$(objtree)/usr/include +userccflags += -I usr/include diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 0c3dc983439b..9a15fbf66aa1 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -86,20 +86,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \ $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c)))) # output directory for tests below -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) # Exit code chooses option. "$$TMP" serves as a temporary file and is # automatically cleaned up. try-run = $(shell set -e; \ - TMP="$(TMPOUT).$$$$.tmp"; \ - TMPO="$(TMPOUT).$$$$.o"; \ + TMP=$(TMPOUT)/tmp; \ + TMPO=$(TMPOUT)/tmp.o; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; \ - fi; \ - rm -f "$$TMP" "$$TMPO") + fi) # as-option # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index c264da2b9b30..a5fe72c504ff 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -25,18 +25,12 @@ failure = $(if-success,$(1),n,y) # $(cc-option,) # Return y if the compiler supports , n otherwise -cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null) +cc-option = $(success,mkdir .tmp_$$$$; trap "rm -rf .tmp_$$$$" EXIT; $(CC) -Werror $(CLANG_FLAGS) $(1) -c -x c /dev/null -o .tmp_$$$$/tmp.o) # $(ld-option,) # Return y if the linker supports , n otherwise ld-option = $(success,$(LD) -v $(1)) -# $(as-option,) -# /dev/zero is used as output instead of /dev/null as some assembler cribs when -# both input and output are same. Also both of them have same write behaviour so -# can be easily substituted. -as-option = $(success, $(CC) $(CLANG_FLAGS) $(1) -c -x assembler /dev/null -o /dev/zero) - # $(as-instr,) # Return y if the assembler supports , n otherwise as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -) diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh index 0fd1cf0c2b94..693dfa1de430 100755 --- a/scripts/atomic/gen-atomic-fallback.sh +++ b/scripts/atomic/gen-atomic-fallback.sh @@ -58,6 +58,21 @@ cat << EOF EOF } +gen_proto_order_variant() +{ + local meta="$1"; shift + local pfx="$1"; shift + local name="$1"; shift + local sfx="$1"; shift + local order="$1"; shift + local arch="$1" + local atomic="$2" + + local basename="${arch}${atomic}_${pfx}${name}${sfx}" + + printf "#define arch_${basename}${order} ${basename}${order}\n" +} + #gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...) gen_proto_order_variants() { @@ -72,6 +87,22 @@ gen_proto_order_variants() local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" + if [ -z "$arch" ]; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" + + if meta_has_acquire "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" + fi + if meta_has_release "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" + fi + if meta_has_relaxed "${meta}"; then + gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@" + fi + + echo "" + fi + # If we don't have relaxed atomics, then we don't bother with ordering fallbacks # read_acquire and set_release need to be templated, though if ! meta_has_relaxed "${meta}"; then diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 13e5fbafdf2f..66a6d511b524 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -27,7 +27,10 @@ parse_symbol() { elif [[ "${modcache[$module]+isset}" == "isset" ]]; then local objfile=${modcache[$module]} else - [[ $modpath == "" ]] && return + if [[ $modpath == "" ]]; then + echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2 + return + fi local objfile=$(find "$modpath" -name "${module//_/[-_]}.ko*" -print -quit) [[ $objfile == "" ]] && return modcache[$module]=$objfile diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh index 955cf3aedf21..224f51012b6e 100755 --- a/scripts/headers_install.sh +++ b/scripts/headers_install.sh @@ -11,7 +11,7 @@ then echo "asm/inline/volatile keywords." echo echo "INFILE: header file to operate on" - echo "OUTFILE: output file which the processed header is writen to" + echo "OUTFILE: output file which the processed header is written to" exit 1 fi diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 74eab03e31d4..f9b19524da11 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -29,6 +29,11 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef find_symtab +#undef get_shnum +#undef set_shnum +#undef get_shstrndx +#undef get_symindex #undef get_sym_str_and_relp #undef do_func #undef Elf_Addr @@ -58,6 +63,11 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define find_symtab find_symtab64 +# define get_shnum get_shnum64 +# define set_shnum set_shnum64 +# define get_shstrndx get_shstrndx64 +# define get_symindex get_symindex64 # define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 @@ -91,6 +101,11 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define find_symtab find_symtab32 +# define get_shnum get_shnum32 +# define set_shnum set_shnum32 +# define get_shstrndx get_shstrndx32 +# define get_symindex get_symindex32 # define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 @@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) return is_fake; } +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx) +{ + unsigned long offset; + int index; + + if (sym->st_shndx != SHN_XINDEX) + return w2(sym->st_shndx); + + offset = (unsigned long)sym - (unsigned long)symtab; + index = offset / sizeof(*sym); + + return w(symtab_shndx[index]); +} + +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (shdr0 && !ehdr->e_shnum) + return w(shdr0->sh_size); + + return w2(ehdr->e_shnum); +} + +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum) +{ + if (new_shnum >= SHN_LORESERVE) { + ehdr->e_shnum = 0; + shdr0->sh_size = w(new_shnum); + } else + ehdr->e_shnum = w2(new_shnum); +} + +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (ehdr->e_shstrndx != SHN_XINDEX) + return w2(ehdr->e_shstrndx); + + return w(shdr0->sh_link); +} + +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0, + unsigned const nhdr, Elf32_Word **symtab, + Elf32_Word **symtab_shndx) +{ + Elf_Shdr const *relhdr; + unsigned k; + + *symtab = NULL; + *symtab_shndx = NULL; + + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { + if (relhdr->sh_type == SHT_SYMTAB) + *symtab = (void *)ehdr + relhdr->sh_offset; + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX) + *symtab_shndx = (void *)ehdr + relhdr->sh_offset; + + if (*symtab && *symtab_shndx) + break; + } +} + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ static int append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, @@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr, char const *mc_name = (sizeof(Elf_Rela) == rel_entsize) ? ".rela__mcount_loc" : ".rel__mcount_loc"; - unsigned const old_shnum = w2(ehdr->e_shnum); uint_t const old_shoff = _w(ehdr->e_shoff); uint_t const old_shstr_sh_size = _w(shstr->sh_size); uint_t const old_shstr_sh_offset = _w(shstr->sh_offset); + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr); + unsigned int const old_shnum = get_shnum(ehdr, shdr0); + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size); uint_t new_e_shoff; @@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr, t += (_align & -t); /* word-byte align */ new_e_shoff = t; + set_shnum(ehdr, shdr0, new_shnum); + /* body for new shstrtab */ if (ulseek(sb.st_size, SEEK_SET) < 0) return -1; @@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr, return -1; ehdr->e_shoff = _w(new_e_shoff); - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ if (ulseek(0, SEEK_SET) < 0) return -1; if (uwrite(ehdr, sizeof(*ehdr)) < 0) @@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx, uint_t *const recvalp, unsigned int *sym_index, Elf_Shdr const *const symhdr, + Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx, Elf_Ehdr const *const ehdr) { Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset) @@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx, for (symp = sym0, t = nsym; t; --t, ++symp) { unsigned int const st_bind = ELF_ST_BIND(symp->st_info); - if (txtndx == w2(symp->st_shndx) + if (txtndx == get_symindex(symp, symtab, symtab_shndx) /* avoid STB_WEAK */ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { /* function symbols on ARM have quirks, avoid them */ @@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, return totrelsz; } - /* Overall supervision for Elf32 ET_REL file. */ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const nhdr = w2(ehdr->e_shnum); - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)]; + unsigned const nhdr = get_shnum(ehdr, shdr0); + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)]; char const *const shstrtab = (char const *)(_w(shstr->sh_offset) + (void *)ehdr); Elf_Shdr const *relhdr; unsigned k; + Elf32_Word *symtab; + Elf32_Word *symtab_shndx; + /* Upper bound on space: assume all relevant relocs are for mcount. */ unsigned totrelsz; @@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, return -1; } + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx); + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); @@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname, result = find_secsym_ndx(w(relhdr->sh_info), txtname, &recval, &recsym, &shdr0[symsec_sh_link], + symtab, symtab_shndx, ehdr); if (result) goto out; diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index 298b73794d8b..16c1894c29bb 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h @@ -107,7 +107,7 @@ struct ima_digest_data { } ng; u8 data[2]; } xattr; - u8 digest[0]; + u8 digest[]; } __packed; /* @@ -119,7 +119,7 @@ struct signature_v2_hdr { uint8_t hash_algo; /* Digest algorithm [enum hash_algo] */ __be32 keyid; /* IMA key identifier - not X509/PGP specific */ __be16 sig_size; /* signature size */ - uint8_t sig[0]; /* signature payload */ + uint8_t sig[]; /* signature payload */ } __packed; /* integrity data associated with an inode */ diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index da94a1b4bfda..0cc7cdd58465 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -27,6 +27,9 @@ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) int s[COND_EXPR_MAXDEPTH]; int sp = -1; + if (expr->len == 0) + return -1; + for (i = 0; i < expr->len; i++) { struct cond_expr_node *node = &expr->nodes[i]; @@ -392,27 +395,19 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) rc = next_entry(buf, fp, sizeof(u32) * 2); if (rc) - goto err; + return rc; expr->expr_type = le32_to_cpu(buf[0]); expr->bool = le32_to_cpu(buf[1]); - if (!expr_node_isvalid(p, expr)) { - rc = -EINVAL; - goto err; - } + if (!expr_node_isvalid(p, expr)) + return -EINVAL; } rc = cond_read_av_list(p, fp, &node->true_list, NULL); if (rc) - goto err; - rc = cond_read_av_list(p, fp, &node->false_list, &node->true_list); - if (rc) - goto err; - return 0; -err: - cond_node_destroy(node); - return rc; + return rc; + return cond_read_av_list(p, fp, &node->false_list, &node->true_list); } int cond_read_list(struct policydb *p, void *fp) diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 313919bd42f8..ef0afd878bfc 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -2888,8 +2888,12 @@ int security_get_bools(struct selinux_state *state, if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); + kfree(*names); } kfree(*values); + *len = 0; + *names = NULL; + *values = NULL; goto out; } diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index 20b8f6cb3ff8..99aec7349167 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -208,8 +208,8 @@ static const struct config_entry config_table[] = { }, #endif +#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE) /* Cometlake-LP */ -#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) { .flags = FLAG_SOF, .device = 0x02c8, @@ -240,9 +240,7 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x02c8, }, -#endif /* Cometlake-H */ -#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H) { .flags = FLAG_SOF, .device = 0x06c8, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index d20aedd103c6..3565e2ab0965 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2470,6 +2470,9 @@ static const struct pci_device_id azx_ids[] = { /* Icelake */ { PCI_DEVICE(0x8086, 0x34c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Icelake-H */ + { PCI_DEVICE(0x8086, 0x3dc8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Jasperlake */ { PCI_DEVICE(0x8086, 0x38c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, @@ -2478,9 +2481,14 @@ static const struct pci_device_id azx_ids[] = { /* Tigerlake */ { PCI_DEVICE(0x8086, 0xa0c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Tigerlake-H */ + { PCI_DEVICE(0x8086, 0x43c8), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Elkhart Lake */ { PCI_DEVICE(0x8086, 0x4b55), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + { PCI_DEVICE(0x8086, 0x4b58), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index fbd7cc6026d8..e2b21ef5d7d1 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4145,6 +4145,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6d73f8beadb6..737ef82a75fd 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2461,6 +2461,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), @@ -7470,6 +7471,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index d8f554f369a8..e6386de20ac7 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c @@ -342,11 +342,34 @@ static int acp3x_dma_close(struct snd_soc_component *component, { struct snd_soc_pcm_runtime *prtd; struct i2s_dev_data *adata; + struct i2s_stream_instance *ins; prtd = substream->private_data; component = snd_soc_rtdcom_lookup(prtd, DRV_NAME); adata = dev_get_drvdata(component->dev); + ins = substream->runtime->private_data; + if (!ins) + return -EINVAL; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + switch (ins->i2s_instance) { + case I2S_BT_INSTANCE: + adata->play_stream = NULL; + break; + case I2S_SP_INSTANCE: + default: + adata->i2ssp_play_stream = NULL; + } + } else { + switch (ins->i2s_instance) { + case I2S_BT_INSTANCE: + adata->capture_stream = NULL; + break; + case I2S_SP_INSTANCE: + default: + adata->i2ssp_capture_stream = NULL; + } + } /* Disable ACP irq, when the current stream is being closed and * another stream is also not active. @@ -354,13 +377,6 @@ static int acp3x_dma_close(struct snd_soc_component *component, if (!adata->play_stream && !adata->capture_stream && !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream) rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { - adata->play_stream = NULL; - adata->i2ssp_play_stream = NULL; - } else { - adata->capture_stream = NULL; - adata->i2ssp_capture_stream = NULL; - } return 0; } diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c index de003acb1951..473efe9ef998 100644 --- a/sound/soc/codecs/hdac_hda.c +++ b/sound/soc/codecs/hdac_hda.c @@ -441,13 +441,13 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component) ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name); if (ret < 0) { dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name); - goto error; + goto error_pm; } ret = snd_hdac_regmap_init(&hcodec->core); if (ret < 0) { dev_err(&hdev->dev, "regmap init failed\n"); - goto error; + goto error_pm; } patch = (hda_codec_patch_t)hcodec->preset->driver_data; @@ -455,7 +455,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component) ret = patch(hcodec); if (ret < 0) { dev_err(&hdev->dev, "patch failed %d\n", ret); - goto error; + goto error_regmap; } } else { dev_dbg(&hdev->dev, "no patch file found\n"); @@ -467,7 +467,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component) ret = snd_hda_codec_parse_pcms(hcodec); if (ret < 0) { dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret); - goto error; + goto error_regmap; } /* HDMI controls need to be created in machine drivers */ @@ -476,7 +476,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component) if (ret < 0) { dev_err(&hdev->dev, "unable to create controls %d\n", ret); - goto error; + goto error_regmap; } } @@ -496,7 +496,9 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component) return 0; -error: +error_regmap: + snd_hdac_regmap_exit(hdev); +error_pm: pm_runtime_put(&hdev->dev); error_no_pm: snd_hdac_ext_bus_link_put(hdev->bus, hlink); @@ -518,6 +520,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component) pm_runtime_disable(&hdev->dev); snd_hdac_ext_bus_link_put(hdev->bus, hlink); + + snd_hdac_regmap_exit(hdev); } static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = { diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c index 0d63ebfbff2f..e6613b52bd78 100644 --- a/sound/soc/codecs/max98390.c +++ b/sound/soc/codecs/max98390.c @@ -700,8 +700,8 @@ static bool max98390_readable_register(struct device *dev, unsigned int reg) case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL: case MAX98390_MEAS_ADC_THERM_WARN_THRESH ... MAX98390_BROWNOUT_INFINITE_HOLD: - case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0: - case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID: + case MAX98390_BROWNOUT_LVL_HOLD ... DSMIG_DEBUZZER_THRESHOLD: + case DSM_VOL_ENA ... MAX98390_R24FF_REV_ID: return true; default: return false; @@ -717,7 +717,7 @@ static bool max98390_volatile_reg(struct device *dev, unsigned int reg) case MAX98390_BROWNOUT_LOWEST_STATUS: case MAX98390_ENV_TRACK_BOOST_VOUT_READ: case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2: - case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0: + case THERMAL_RDC_RD_BACK_BYTE1 ... DSMIG_DEBUZZER_THRESHOLD: case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN: return true; default: diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c index 67e2e944d21b..2cccb310fa96 100644 --- a/sound/soc/codecs/rt1015.c +++ b/sound/soc/codecs/rt1015.c @@ -34,30 +34,32 @@ static const struct reg_default rt1015_reg[] = { { 0x0000, 0x0000 }, { 0x0004, 0xa000 }, { 0x0006, 0x0003 }, - { 0x000a, 0x0802 }, - { 0x000c, 0x0020 }, + { 0x000a, 0x081e }, + { 0x000c, 0x0006 }, { 0x000e, 0x0000 }, { 0x0010, 0x0000 }, { 0x0012, 0x0000 }, + { 0x0014, 0x0000 }, + { 0x0016, 0x0000 }, + { 0x0018, 0x0000 }, { 0x0020, 0x8000 }, - { 0x0022, 0x471b }, - { 0x006a, 0x0000 }, - { 0x006c, 0x4020 }, + { 0x0022, 0x8043 }, { 0x0076, 0x0000 }, { 0x0078, 0x0000 }, - { 0x007a, 0x0000 }, + { 0x007a, 0x0002 }, { 0x007c, 0x10ec }, { 0x007d, 0x1015 }, { 0x00f0, 0x5000 }, - { 0x00f2, 0x0774 }, - { 0x00f3, 0x8400 }, + { 0x00f2, 0x004c }, + { 0x00f3, 0xecfe }, { 0x00f4, 0x0000 }, + { 0x00f6, 0x0400 }, { 0x0100, 0x0028 }, { 0x0102, 0xff02 }, - { 0x0104, 0x8232 }, + { 0x0104, 0xa213 }, { 0x0106, 0x200c }, - { 0x010c, 0x002f }, - { 0x010e, 0xc000 }, + { 0x010c, 0x0000 }, + { 0x010e, 0x0058 }, { 0x0111, 0x0200 }, { 0x0112, 0x0400 }, { 0x0114, 0x0022 }, @@ -65,38 +67,46 @@ static const struct reg_default rt1015_reg[] = { { 0x0118, 0x0000 }, { 0x011a, 0x0123 }, { 0x011c, 0x4567 }, - { 0x0300, 0xdddd }, - { 0x0302, 0x0000 }, - { 0x0311, 0x9330 }, - { 0x0313, 0x0000 }, - { 0x0314, 0x0000 }, + { 0x0300, 0x203d }, + { 0x0302, 0x001e }, + { 0x0311, 0x0000 }, + { 0x0313, 0x6014 }, + { 0x0314, 0x00a2 }, { 0x031a, 0x00a0 }, { 0x031c, 0x001f }, { 0x031d, 0xffff }, { 0x031e, 0x0000 }, { 0x031f, 0x0000 }, + { 0x0320, 0x0000 }, { 0x0321, 0x0000 }, - { 0x0322, 0x0000 }, - { 0x0328, 0x0000 }, - { 0x0329, 0x0000 }, - { 0x032a, 0x0000 }, - { 0x032b, 0x0000 }, - { 0x032c, 0x0000 }, - { 0x032d, 0x0000 }, - { 0x032e, 0x030e }, - { 0x0330, 0x0080 }, + { 0x0322, 0xd7df }, + { 0x0328, 0x10b2 }, + { 0x0329, 0x0175 }, + { 0x032a, 0x36ad }, + { 0x032b, 0x7e55 }, + { 0x032c, 0x0520 }, + { 0x032d, 0xaa00 }, + { 0x032e, 0x570e }, + { 0x0330, 0xe180 }, { 0x0332, 0x0034 }, - { 0x0334, 0x0000 }, - { 0x0336, 0x0000 }, + { 0x0334, 0x0001 }, + { 0x0336, 0x0010 }, + { 0x0338, 0x0000 }, + { 0x04fa, 0x0030 }, + { 0x04fc, 0x35c8 }, + { 0x04fe, 0x0800 }, + { 0x0500, 0x0400 }, + { 0x0502, 0x1000 }, + { 0x0504, 0x0000 }, { 0x0506, 0x04ff }, - { 0x0508, 0x0030 }, - { 0x050a, 0x0018 }, - { 0x0519, 0x307f }, - { 0x051a, 0xffff }, - { 0x051b, 0x4000 }, + { 0x0508, 0x0010 }, + { 0x050a, 0x001a }, + { 0x0519, 0x1c68 }, + { 0x051a, 0x0ccc }, + { 0x051b, 0x0666 }, { 0x051d, 0x0000 }, { 0x051f, 0x0000 }, - { 0x0536, 0x1000 }, + { 0x0536, 0x061c }, { 0x0538, 0x0000 }, { 0x053a, 0x0000 }, { 0x053c, 0x0000 }, @@ -110,19 +120,18 @@ static const struct reg_default rt1015_reg[] = { { 0x0544, 0x0000 }, { 0x0568, 0x0000 }, { 0x056a, 0x0000 }, - { 0x1000, 0x0000 }, - { 0x1002, 0x6505 }, + { 0x1000, 0x0040 }, + { 0x1002, 0x5405 }, { 0x1006, 0x5515 }, - { 0x1007, 0x003f }, - { 0x1009, 0x770f }, - { 0x100a, 0x01ff }, - { 0x100c, 0x0000 }, + { 0x1007, 0x05f7 }, + { 0x1009, 0x0b0a }, + { 0x100a, 0x00ef }, { 0x100d, 0x0003 }, { 0x1010, 0xa433 }, { 0x1020, 0x0000 }, - { 0x1200, 0x3d02 }, - { 0x1202, 0x0813 }, - { 0x1204, 0x0211 }, + { 0x1200, 0x5a01 }, + { 0x1202, 0x6524 }, + { 0x1204, 0x1f00 }, { 0x1206, 0x0000 }, { 0x1208, 0x0000 }, { 0x120a, 0x0000 }, @@ -130,16 +139,16 @@ static const struct reg_default rt1015_reg[] = { { 0x120e, 0x0000 }, { 0x1210, 0x0000 }, { 0x1212, 0x0000 }, - { 0x1300, 0x0701 }, - { 0x1302, 0x12f9 }, - { 0x1304, 0x3405 }, + { 0x1300, 0x10a1 }, + { 0x1302, 0x12ff }, + { 0x1304, 0x0400 }, { 0x1305, 0x0844 }, - { 0x1306, 0x1611 }, + { 0x1306, 0x4611 }, { 0x1308, 0x555e }, { 0x130a, 0x0000 }, - { 0x130c, 0x2400}, - { 0x130e, 0x7700 }, - { 0x130f, 0x0000 }, + { 0x130c, 0x2000 }, + { 0x130e, 0x0100 }, + { 0x130f, 0x0001 }, { 0x1310, 0x0000 }, { 0x1312, 0x0000 }, { 0x1314, 0x0000 }, @@ -209,6 +218,9 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg) case RT1015_DC_CALIB_CLSD7: case RT1015_DC_CALIB_CLSD8: case RT1015_S_BST_TIMING_INTER1: + case RT1015_OSCK_STA: + case RT1015_MONO_DYNA_CTRL1: + case RT1015_MONO_DYNA_CTRL5: return true; default: @@ -224,6 +236,12 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg) case RT1015_CLK3: case RT1015_PLL1: case RT1015_PLL2: + case RT1015_DUM_RW1: + case RT1015_DUM_RW2: + case RT1015_DUM_RW3: + case RT1015_DUM_RW4: + case RT1015_DUM_RW5: + case RT1015_DUM_RW6: case RT1015_CLK_DET: case RT1015_SIL_DET: case RT1015_CUSTOMER_ID: @@ -235,6 +253,7 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg) case RT1015_PAD_DRV2: case RT1015_GAT_BOOST: case RT1015_PRO_ALT: + case RT1015_OSCK_STA: case RT1015_MAN_I2C: case RT1015_DAC1: case RT1015_DAC2: @@ -272,6 +291,13 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg) case RT1015_SMART_BST_CTRL2: case RT1015_ANA_CTRL1: case RT1015_ANA_CTRL2: + case RT1015_PWR_STATE_CTRL: + case RT1015_MONO_DYNA_CTRL: + case RT1015_MONO_DYNA_CTRL1: + case RT1015_MONO_DYNA_CTRL2: + case RT1015_MONO_DYNA_CTRL3: + case RT1015_MONO_DYNA_CTRL4: + case RT1015_MONO_DYNA_CTRL5: case RT1015_SPK_VOL: case RT1015_SHORT_DETTOP1: case RT1015_SHORT_DETTOP2: diff --git a/sound/soc/codecs/rt1015.h b/sound/soc/codecs/rt1015.h index 6fbe802082c4..8169962935a5 100644 --- a/sound/soc/codecs/rt1015.h +++ b/sound/soc/codecs/rt1015.h @@ -21,6 +21,12 @@ #define RT1015_CLK3 0x0006 #define RT1015_PLL1 0x000a #define RT1015_PLL2 0x000c +#define RT1015_DUM_RW1 0x000e +#define RT1015_DUM_RW2 0x0010 +#define RT1015_DUM_RW3 0x0012 +#define RT1015_DUM_RW4 0x0014 +#define RT1015_DUM_RW5 0x0016 +#define RT1015_DUM_RW6 0x0018 #define RT1015_CLK_DET 0x0020 #define RT1015_SIL_DET 0x0022 #define RT1015_CUSTOMER_ID 0x0076 @@ -32,6 +38,7 @@ #define RT1015_PAD_DRV2 0x00f2 #define RT1015_GAT_BOOST 0x00f3 #define RT1015_PRO_ALT 0x00f4 +#define RT1015_OSCK_STA 0x00f6 #define RT1015_MAN_I2C 0x0100 #define RT1015_DAC1 0x0102 #define RT1015_DAC2 0x0104 @@ -70,7 +77,13 @@ #define RT1015_ANA_CTRL1 0x0334 #define RT1015_ANA_CTRL2 0x0336 #define RT1015_PWR_STATE_CTRL 0x0338 -#define RT1015_SPK_VOL 0x0506 +#define RT1015_MONO_DYNA_CTRL 0x04fa +#define RT1015_MONO_DYNA_CTRL1 0x04fc +#define RT1015_MONO_DYNA_CTRL2 0x04fe +#define RT1015_MONO_DYNA_CTRL3 0x0500 +#define RT1015_MONO_DYNA_CTRL4 0x0502 +#define RT1015_MONO_DYNA_CTRL5 0x0504 +#define RT1015_SPK_VOL 0x0506 #define RT1015_SHORT_DETTOP1 0x0508 #define RT1015_SHORT_DETTOP2 0x050a #define RT1015_SPK_DC_DETECT1 0x0519 diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index d3245123101d..3e9d2c6c51f9 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -2829,12 +2829,13 @@ static int rt5682_probe(struct snd_soc_component *component) return ret; } rt5682->mclk = NULL; - } else { - /* Register CCF DAI clock control */ - ret = rt5682_register_dai_clks(component); - if (ret) - return ret; } + + /* Register CCF DAI clock control */ + ret = rt5682_register_dai_clks(component); + if (ret) + return ret; + /* Initial setup for CCF */ rt5682->lrck[RT5682_AIF1] = CLK_48; #endif diff --git a/sound/soc/fsl/fsl_asrc_common.h b/sound/soc/fsl/fsl_asrc_common.h index 77665b15c8db..7e1c13ca37f1 100644 --- a/sound/soc/fsl/fsl_asrc_common.h +++ b/sound/soc/fsl/fsl_asrc_common.h @@ -32,6 +32,7 @@ enum asrc_pair_index { * @dma_chan: inputer and output DMA channels * @dma_data: private dma data * @pos: hardware pointer position + * @req_dma_chan: flag to release dev_to_dev chan * @private: pair private area */ struct fsl_asrc_pair { @@ -45,6 +46,7 @@ struct fsl_asrc_pair { struct dma_chan *dma_chan[2]; struct imx_dma_data dma_data; unsigned int pos; + bool req_dma_chan; void *private; }; diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index d6a3fc5f87e5..5f01a58f422a 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -135,6 +135,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, struct snd_dmaengine_dai_dma_data *dma_params_be = NULL; struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_asrc_pair *pair = runtime->private_data; + struct dma_chan *tmp_chan = NULL, *be_chan = NULL; + struct snd_soc_component *component_be = NULL; struct fsl_asrc *asrc = pair->asrc; struct dma_slave_config config_fe, config_be; enum asrc_pair_index index = pair->index; @@ -142,7 +144,6 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, int stream = substream->stream; struct imx_dma_data *tmp_data; struct snd_soc_dpcm *dpcm; - struct dma_chan *tmp_chan; struct device *dev_be; u8 dir = tx ? OUT : IN; dma_cap_mask_t mask; @@ -197,18 +198,30 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_CYCLIC, mask); + /* + * The Back-End device might have already requested a DMA channel, + * so try to reuse it first, and then request a new one upon NULL. + */ + component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME); + if (component_be) { + be_chan = soc_component_to_pcm(component_be)->chan[substream->stream]; + tmp_chan = be_chan; + } + if (!tmp_chan) + tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx"); + /* * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each * peripheral, unlike SDMA channel that is allocated dynamically. So no - * need to configure dma_request and dma_request2, but get dma_chan via - * dma_request_slave_channel directly with dma name of Front-End device + * need to configure dma_request and dma_request2, but get dma_chan of + * Back-End device directly via dma_request_slave_channel. */ if (!asrc->use_edma) { /* Get DMA request of Back-End */ - tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx"); tmp_data = tmp_chan->private; pair->dma_data.dma_request = tmp_data->dma_request; - dma_release_channel(tmp_chan); + if (!be_chan) + dma_release_channel(tmp_chan); /* Get DMA request of Front-End */ tmp_chan = asrc->get_dma_channel(pair, dir); @@ -220,9 +233,11 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, pair->dma_chan[dir] = dma_request_channel(mask, filter, &pair->dma_data); + pair->req_dma_chan = true; } else { - pair->dma_chan[dir] = - asrc->get_dma_channel(pair, dir); + pair->dma_chan[dir] = tmp_chan; + /* Do not flag to release if we are reusing the Back-End one */ + pair->req_dma_chan = !be_chan; } if (!pair->dma_chan[dir]) { @@ -261,7 +276,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be); if (ret) { dev_err(dev, "failed to config DMA channel for Back-End\n"); - dma_release_channel(pair->dma_chan[dir]); + if (pair->req_dma_chan) + dma_release_channel(pair->dma_chan[dir]); return ret; } @@ -273,19 +289,22 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, static int fsl_asrc_dma_hw_free(struct snd_soc_component *component, struct snd_pcm_substream *substream) { + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; struct snd_pcm_runtime *runtime = substream->runtime; struct fsl_asrc_pair *pair = runtime->private_data; + u8 dir = tx ? OUT : IN; snd_pcm_set_runtime_buffer(substream, NULL); - if (pair->dma_chan[IN]) - dma_release_channel(pair->dma_chan[IN]); + if (pair->dma_chan[!dir]) + dma_release_channel(pair->dma_chan[!dir]); - if (pair->dma_chan[OUT]) - dma_release_channel(pair->dma_chan[OUT]); + /* release dev_to_dev chan if we aren't reusing the Back-End one */ + if (pair->dma_chan[dir] && pair->req_dma_chan) + dma_release_channel(pair->dma_chan[dir]); - pair->dma_chan[IN] = NULL; - pair->dma_chan[OUT] = NULL; + pair->dma_chan[!dir] = NULL; + pair->dma_chan[dir] = NULL; return 0; } diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index bad89b0d129e..1a2fa7f18142 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, struct regmap *regs = ssi->regs; u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i; unsigned long clkrate, baudrate, tmprate; - unsigned int slots = params_channels(hw_params); - unsigned int slot_width = 32; + unsigned int channels = params_channels(hw_params); + unsigned int slot_width = params_width(hw_params); + unsigned int slots = 2; u64 sub, savesub = 100000; unsigned int freq; bool baudclk_is_used; @@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, /* Override slots and slot_width if being specifically set... */ if (ssi->slots) slots = ssi->slots; - /* ...but keep 32 bits if slots is 2 -- I2S Master mode */ - if (ssi->slot_width && slots != 2) + if (ssi->slot_width) slot_width = ssi->slot_width; + /* ...but force 32 bits for stereo audio using I2S Master Mode */ + if (channels == 2 && + (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER) + slot_width = 32; + /* Generate bit clock based on the slot number and slot width */ freq = slots * slot_width * params_rate(hw_params); diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig index a2a5798c9139..5dc489a79454 100644 --- a/sound/soc/intel/boards/Kconfig +++ b/sound/soc/intel/boards/Kconfig @@ -492,7 +492,7 @@ config SND_SOC_INTEL_SOF_PCM512x_MACH endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL -if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK) +if (SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK) config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH tristate "CML_LP with DA7219 and MAX98357A in I2S Mode" @@ -520,7 +520,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH Say Y if you have such a device. If unsure select "N". -endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK +endif ## SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK if SND_SOC_SOF_JASPERLAKE diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c index 6c20bdd850f3..8ada4ecba847 100644 --- a/sound/soc/qcom/common.c +++ b/sound/soc/qcom/common.c @@ -4,6 +4,7 @@ #include #include "common.h" +#include "qdsp6/q6afe.h" int qcom_snd_parse_of(struct snd_soc_card *card) { @@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card) } link->no_pcm = 1; link->ignore_pmdown_time = 1; + + if (q6afe_is_rx_port(link->id)) { + link->dpcm_playback = 1; + link->dpcm_capture = 0; + } else { + link->dpcm_playback = 0; + link->dpcm_capture = 1; + } + } else { dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL); if (!dlc) @@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card) link->codecs->dai_name = "snd-soc-dummy-dai"; link->codecs->name = "snd-soc-dummy"; link->dynamic = 1; + link->dpcm_playback = 1; + link->dpcm_capture = 1; } link->ignore_suspend = 1; link->nonatomic = 1; - link->dpcm_playback = 1; - link->dpcm_capture = 1; link->stream_name = link->name; link++; diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c index e0945f7a58c8..0ce4eb60f984 100644 --- a/sound/soc/qcom/qdsp6/q6afe.c +++ b/sound/soc/qcom/qdsp6/q6afe.c @@ -800,6 +800,14 @@ int q6afe_get_port_id(int index) } EXPORT_SYMBOL_GPL(q6afe_get_port_id); +int q6afe_is_rx_port(int index) +{ + if (index < 0 || index >= AFE_PORT_MAX) + return -EINVAL; + + return port_maps[index].is_rx; +} +EXPORT_SYMBOL_GPL(q6afe_is_rx_port); static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt, struct q6afe_port *port) { diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h index c7ed5422baff..1a0f80a14afe 100644 --- a/sound/soc/qcom/qdsp6/q6afe.h +++ b/sound/soc/qcom/qdsp6/q6afe.h @@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port); int q6afe_port_stop(struct q6afe_port *port); void q6afe_port_put(struct q6afe_port *port); int q6afe_get_port_id(int index); +int q6afe_is_rx_port(int index); void q6afe_hdmi_port_prepare(struct q6afe_port *port, struct q6afe_hdmi_cfg *cfg); void q6afe_slim_port_prepare(struct q6afe_port *port, diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index 0e0e8f7a460a..ae4b2cabdf2d 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c @@ -25,6 +25,7 @@ #define ASM_STREAM_CMD_FLUSH 0x00010BCE #define ASM_SESSION_CMD_PAUSE 0x00010BD3 #define ASM_DATA_CMD_EOS 0x00010BDB +#define ASM_DATA_EVENT_RENDERED_EOS 0x00010C1C #define ASM_NULL_POPP_TOPOLOGY 0x00010C68 #define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10 @@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, case ASM_SESSION_CMD_SUSPEND: client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE; break; - case ASM_DATA_CMD_EOS: - client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; - break; case ASM_STREAM_CMD_FLUSH: client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE; break; @@ -727,6 +725,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev, spin_unlock_irqrestore(&ac->lock, flags); } + break; + case ASM_DATA_EVENT_RENDERED_EOS: + client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE; break; } diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 7cd42fcfcf38..1707414cfa92 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(pdm->regmap); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 7b387202c5db..0f30f5aabaa8 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -310,7 +310,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd, } EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup); -static struct snd_soc_component +struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name) { struct snd_soc_component *component; @@ -329,6 +329,7 @@ static struct snd_soc_component return found_component; } +EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked); struct snd_soc_component *snd_soc_lookup_component(struct device *dev, const char *driver_name) diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c index a9ea172a66a7..11e5d7962370 100644 --- a/sound/soc/soc-devres.c +++ b/sound/soc/soc-devres.c @@ -9,6 +9,43 @@ #include #include +static void devm_dai_release(struct device *dev, void *res) +{ + snd_soc_unregister_dai(*(struct snd_soc_dai **)res); +} + +/** + * devm_snd_soc_register_dai - resource-managed dai registration + * @dev: Device used to manage component + * @component: The component the DAIs are registered for + * @dai_drv: DAI driver to use for the DAI + * @legacy_dai_naming: if %true, use legacy single-name format; + * if %false, use multiple-name format; + */ +struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev, + struct snd_soc_component *component, + struct snd_soc_dai_driver *dai_drv, + bool legacy_dai_naming) +{ + struct snd_soc_dai **ptr; + struct snd_soc_dai *dai; + + ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return NULL; + + dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming); + if (dai) { + *ptr = dai; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return dai; +} +EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai); + static void devm_component_release(struct device *dev, void *res) { snd_soc_unregister_component(*(struct device **)res); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index f728309a0833..80a4e71f2d95 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -21,18 +21,6 @@ */ #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31) -struct dmaengine_pcm { - struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1]; - const struct snd_dmaengine_pcm_config *config; - struct snd_soc_component component; - unsigned int flags; -}; - -static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p) -{ - return container_of(p, struct dmaengine_pcm, component); -} - static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm, struct snd_pcm_substream *substream) { diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 2c114b4542ce..c517064f5391 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -2630,15 +2630,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new) int count, paths; int ret; + if (!fe->dai_link->dynamic) + return 0; + if (fe->num_cpus > 1) { dev_err(fe->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } - if (!fe->dai_link->dynamic) - return 0; - /* only check active links */ if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0))) return 0; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 9e89633676b7..43e5745b06aa 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1851,7 +1851,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg, list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list); /* register the DAI to the component */ - dai = snd_soc_register_dai(tplg->comp, dai_drv, false); + dai = devm_snd_soc_register_dai(tplg->comp->dev, tplg->comp, dai_drv, false); if (!dai) return -ENOMEM; @@ -1859,7 +1859,6 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg, ret = snd_soc_dapm_new_dai_widgets(dapm, dai); if (ret != 0) { dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret); - snd_soc_unregister_dai(dai); return ret; } diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig index c9a2bee4b55c..3aaf25e4f766 100644 --- a/sound/soc/sof/intel/Kconfig +++ b/sound/soc/sof/intel/Kconfig @@ -25,8 +25,7 @@ config SND_SOC_SOF_INTEL_PCI select SND_SOC_SOF_CANNONLAKE if SND_SOC_SOF_CANNONLAKE_SUPPORT select SND_SOC_SOF_COFFEELAKE if SND_SOC_SOF_COFFEELAKE_SUPPORT select SND_SOC_SOF_ICELAKE if SND_SOC_SOF_ICELAKE_SUPPORT - select SND_SOC_SOF_COMETLAKE_LP if SND_SOC_SOF_COMETLAKE_LP_SUPPORT - select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT + select SND_SOC_SOF_COMETLAKE if SND_SOC_SOF_COMETLAKE_SUPPORT select SND_SOC_SOF_TIGERLAKE if SND_SOC_SOF_TIGERLAKE_SUPPORT select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT select SND_SOC_SOF_JASPERLAKE if SND_SOC_SOF_JASPERLAKE_SUPPORT @@ -201,34 +200,22 @@ config SND_SOC_SOF_ICELAKE This option is not user-selectable but automagically handled by 'select' statements at a higher level -config SND_SOC_SOF_COMETLAKE_LP +config SND_SOC_SOF_COMETLAKE tristate select SND_SOC_SOF_HDA_COMMON help This option is not user-selectable but automagically handled by 'select' statements at a higher level +config SND_SOC_SOF_COMETLAKE_SUPPORT + bool + config SND_SOC_SOF_COMETLAKE_LP_SUPPORT - bool "SOF support for CometLake-LP" + bool "SOF support for CometLake" + select SND_SOC_SOF_COMETLAKE_SUPPORT help This adds support for Sound Open Firmware for Intel(R) platforms - using the Cometlake-LP processors. - Say Y if you have such a device. - If unsure select "N". - -config SND_SOC_SOF_COMETLAKE_H - tristate - select SND_SOC_SOF_HDA_COMMON - help - This option is not user-selectable but automagically handled by - 'select' statements at a higher level - -config SND_SOC_SOF_COMETLAKE_H_SUPPORT - bool "SOF support for CometLake-H" - help - This adds support for Sound Open Firmware for Intel(R) platforms - using the Cometlake-H processors. - Say Y if you have such a device. + using the Cometlake processors. If unsure select "N". config SND_SOC_SOF_TIGERLAKE_SUPPORT diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c index 7f65dcc95811..1bda14c3590c 100644 --- a/sound/soc/sof/intel/hda-stream.c +++ b/sound/soc/sof/intel/hda-stream.c @@ -653,11 +653,16 @@ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context) if (status & AZX_INT_CTRL_EN) { rirb_status = snd_hdac_chip_readb(bus, RIRBSTS); if (rirb_status & RIRB_INT_MASK) { + /* + * Clearing the interrupt status here ensures + * that no interrupt gets masked after the RIRB + * wp is read in snd_hdac_bus_update_rirb. + */ + snd_hdac_chip_writeb(bus, RIRBSTS, + RIRB_INT_MASK); active = true; if (rirb_status & RIRB_INT_RESPONSE) snd_hdac_bus_update_rirb(bus); - snd_hdac_chip_writeb(bus, RIRBSTS, - RIRB_INT_MASK); } } #endif diff --git a/sound/soc/sof/probe.h b/sound/soc/sof/probe.h index b04b728c7224..5e159ab239fa 100644 --- a/sound/soc/sof/probe.h +++ b/sound/soc/sof/probe.h @@ -36,7 +36,7 @@ struct sof_probe_point_desc { struct sof_ipc_probe_dma_add_params { struct sof_ipc_cmd_hdr hdr; unsigned int num_elems; - struct sof_probe_dma dma[0]; + struct sof_probe_dma dma[]; } __packed; struct sof_ipc_probe_info_params { @@ -51,19 +51,19 @@ struct sof_ipc_probe_info_params { struct sof_ipc_probe_dma_remove_params { struct sof_ipc_cmd_hdr hdr; unsigned int num_elems; - unsigned int stream_tag[0]; + unsigned int stream_tag[]; } __packed; struct sof_ipc_probe_point_add_params { struct sof_ipc_cmd_hdr hdr; unsigned int num_elems; - struct sof_probe_point_desc desc[0]; + struct sof_probe_point_desc desc[]; } __packed; struct sof_ipc_probe_point_remove_params { struct sof_ipc_cmd_hdr hdr; unsigned int num_elems; - unsigned int buffer_id[0]; + unsigned int buffer_id[]; } __packed; int sof_ipc_probe_init(struct snd_sof_dev *sdev, diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c index b13697dab7c0..aa3532ba1434 100644 --- a/sound/soc/sof/sof-pci-dev.c +++ b/sound/soc/sof/sof-pci-dev.c @@ -151,9 +151,7 @@ static const struct sof_dev_desc cfl_desc = { }; #endif -#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) || \ - IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H) - +#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE) static const struct sof_dev_desc cml_desc = { .machines = snd_soc_acpi_intel_cml_machines, .alt_machines = snd_soc_acpi_intel_cml_sdw_machines, @@ -411,8 +409,11 @@ static const struct pci_device_id sof_pci_ids[] = { .driver_data = (unsigned long)&cfl_desc}, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE) - { PCI_DEVICE(0x8086, 0x34C8), + { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */ .driver_data = (unsigned long)&icl_desc}, + { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */ + .driver_data = (unsigned long)&icl_desc}, + #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE) { PCI_DEVICE(0x8086, 0x38c8), @@ -420,17 +421,20 @@ static const struct pci_device_id sof_pci_ids[] = { { PCI_DEVICE(0x8086, 0x4dc8), .driver_data = (unsigned long)&jsl_desc}, #endif -#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) - { PCI_DEVICE(0x8086, 0x02c8), +#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE) + { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */ .driver_data = (unsigned long)&cml_desc}, -#endif -#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H) - { PCI_DEVICE(0x8086, 0x06c8), + { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */ + .driver_data = (unsigned long)&cml_desc}, + { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */ .driver_data = (unsigned long)&cml_desc}, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) - { PCI_DEVICE(0x8086, 0xa0c8), + { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */ .driver_data = (unsigned long)&tgl_desc}, + { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */ + .driver_data = (unsigned long)&tgl_desc}, + #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE) { PCI_DEVICE(0x8086, 0x4b55), diff --git a/sound/usb/format.c b/sound/usb/format.c index 5ffb457cc88c..1b28d01d1f4c 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -394,8 +394,9 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, return nr_rates; } -/* Line6 Helix series don't support the UAC2_CS_RANGE usb function - * call. Return a static table of known clock rates. +/* Line6 Helix series and the Rode Rodecaster Pro don't support the + * UAC2_CS_RANGE usb function call. Return a static table of known + * clock rates. */ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, struct audioformat *fp) @@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */ case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */ case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */ + case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 15769f266790..eab0fd4fd7c3 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap, * if failed, give up and free the control instance. */ -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl) +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; @@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, return err; } list->kctl = kctl; + list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; @@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) unitid = delegate_notify(mixer, unitid, NULL, NULL); for_each_mixer_elem(list, mixer, unitid) { - struct usb_mixer_elem_info *info = - mixer_elem_list_to_info(list); + struct usb_mixer_elem_info *info; + + if (!list->is_std_info) + continue; + info = mixer_elem_list_to_info(list); /* invalidate cache, so the value is read from the device */ info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, @@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, if (!list->kctl) continue; + if (!list->is_std_info) + continue; info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 41ec9dc4139b..c29e27ac43a7 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -66,6 +66,7 @@ struct usb_mixer_elem_list { struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; + bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; @@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl); +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info); + +#define snd_usb_mixer_add_control(list, kctl) \ + snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index b6bcf2f92383..cec1cfd7edb7 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; - return snd_usb_mixer_add_control(list, kctl); + /* don't use snd_usb_mixer_add_control() here, this is a special list element */ + return snd_usb_mixer_add_list(list, kctl, false); } /* diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 8a05dcb1344f..a777d36c4f5a 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; @@ -1786,6 +1787,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs); + subs->data_endpoint->retire_data_urb = NULL; subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index bca0179a0ef8..fca72730a802 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1532,6 +1532,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) static bool is_itf_usb_dsd_dac(unsigned int id) { switch (id) { + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ @@ -1673,6 +1674,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) usleep_range(1000, 2000); + + /* + * Samsung USBC Headset (AKG) need a tiny delay after each + * class compliant request. (Model number: AAM625R or AAM627R) + */ + if (chip->usb_id == USB_ID(0x04e8, 0xa051) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) + usleep_range(5000, 6000); } /* @@ -1856,6 +1865,7 @@ struct registration_quirk { static const struct registration_quirk registration_quirks[] = { REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ + REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ { 0 } /* terminator */ }; diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index db189945e9b0..02dabc9e77b0 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -362,6 +362,7 @@ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */ #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ +#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ @@ -407,5 +408,6 @@ #define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ +#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index ef452b817f44..e8370e64a155 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -128,6 +128,10 @@ #define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */ #define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */ +/* SRBDS support */ +#define MSR_IA32_MCU_OPT_CTRL 0x00000123 +#define RNGDS_MITG_DIS BIT(0) + #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 #define MSR_IA32_SYSENTER_EIP 0x00000176 diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 43e24903812c..17c5a038f42d 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -385,33 +385,48 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4) #define KVM_STATE_NESTED_FORMAT_VMX 0 -#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */ +#define KVM_STATE_NESTED_FORMAT_SVM 1 #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 #define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_MTF_PENDING 0x00000008 +#define KVM_STATE_NESTED_GIF_SET 0x00000100 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000 +#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000 + +#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 + struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; - __u64 preemption_timer_deadline; }; struct kvm_vmx_nested_state_hdr { + __u32 flags; __u64 vmxon_pa; __u64 vmcs12_pa; + __u64 preemption_timer_deadline; struct { __u16 flags; } smm; }; +struct kvm_svm_nested_state_data { + /* Save area only used if KVM_STATE_NESTED_RUN_PENDING. */ + __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE]; +}; + +struct kvm_svm_nested_state_hdr { + __u64 vmcb_pa; +}; + /* for KVM_CAP_NESTED_STATE */ struct kvm_nested_state { __u16 flags; @@ -420,6 +435,7 @@ struct kvm_nested_state { union { struct kvm_vmx_nested_state_hdr vmx; + struct kvm_svm_nested_state_hdr svm; /* Pad the header to 128 bytes. */ __u8 pad[120]; @@ -432,6 +448,7 @@ struct kvm_nested_state { */ union { struct kvm_vmx_nested_state_data vmx[0]; + struct kvm_svm_nested_state_data svm[0]; } data; }; diff --git a/tools/arch/x86/include/uapi/asm/unistd.h b/tools/arch/x86/include/uapi/asm/unistd.h index 30d7d04d72d6..be5e2e747f50 100644 --- a/tools/arch/x86/include/uapi/asm/unistd.h +++ b/tools/arch/x86/include/uapi/asm/unistd.h @@ -2,7 +2,14 @@ #ifndef _UAPI_ASM_X86_UNISTD_H #define _UAPI_ASM_X86_UNISTD_H -/* x32 syscall flag bit */ +/* + * x32 syscall flag bit. Some user programs expect syscall NR macros + * and __X32_SYSCALL_BIT to have type int, even though syscall numbers + * are, for practical purposes, unsigned long. + * + * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right + * thing regardless. + */ #define __X32_SYSCALL_BIT 0x40000000 #ifndef __KERNEL__ diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index e95b72ec19bc..b8ff9e8ac0d5 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -150,6 +150,9 @@ { EXIT_REASON_UMWAIT, "UMWAIT" }, \ { EXIT_REASON_TPAUSE, "TPAUSE" } +#define VMX_EXIT_REASON_FLAGS \ + { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" } + #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c index 0efaf45f7367..e0878f5f74b1 100644 --- a/tools/bootconfig/main.c +++ b/tools/bootconfig/main.c @@ -14,13 +14,18 @@ #include #include -static int xbc_show_array(struct xbc_node *node) +static int xbc_show_value(struct xbc_node *node) { const char *val; + char q; int i = 0; xbc_array_for_each_value(node, val) { - printf("\"%s\"%s", val, node->next ? ", " : ";\n"); + if (strchr(val, '"')) + q = '\''; + else + q = '"'; + printf("%c%s%c%s", q, val, q, node->next ? ", " : ";\n"); i++; } return i; @@ -48,10 +53,7 @@ static void xbc_show_compact_tree(void) continue; } else if (cnode && xbc_node_is_value(cnode)) { printf("%s = ", xbc_node_get_data(node)); - if (cnode->next) - xbc_show_array(cnode); - else - printf("\"%s\";\n", xbc_node_get_data(cnode)); + xbc_show_value(cnode); } else { printf("%s;\n", xbc_node_get_data(node)); } @@ -205,11 +207,13 @@ int show_xbc(const char *path) } ret = load_xbc_from_initrd(fd, &buf); - if (ret < 0) + if (ret < 0) { pr_err("Failed to load a boot config from initrd: %d\n", ret); - else - xbc_show_compact_tree(); - + goto out; + } + xbc_show_compact_tree(); + ret = 0; +out: close(fd); free(buf); diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh index eff16b77d5eb..3c2ab9e75730 100755 --- a/tools/bootconfig/test-bootconfig.sh +++ b/tools/bootconfig/test-bootconfig.sh @@ -55,6 +55,9 @@ echo "Apply command test" xpass $BOOTCONF -a $TEMPCONF $INITRD new_size=$(stat -c %s $INITRD) +echo "Show command test" +xpass $BOOTCONF $INITRD + echo "File size check" xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9 + 12) @@ -114,6 +117,13 @@ xpass grep -q "bar" $OUTFILE xpass grep -q "baz" $OUTFILE xpass grep -q "qux" $OUTFILE +echo "Double/single quotes test" +echo "key = '\"string\"';" > $TEMPCONF +$BOOTCONF -a $TEMPCONF $INITRD +$BOOTCONF $INITRD > $TEMPCONF +cat $TEMPCONF +xpass grep \'\"string\"\' $TEMPCONF + echo "=== expected failure cases ===" for i in samples/bad-* ; do xfail $BOOTCONF -a $i $INITRD diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 31101643e57c..70c78faa47ab 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -49,7 +49,7 @@ MAP COMMANDS | | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps** | | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash** | | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** -| | **queue** | **stack** | **sk_storage** | **struct_ops** } +| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** } DESCRIPTION =========== diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index c5fac8068ba1..1d3b60651078 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -49,6 +49,7 @@ const char * const map_type_name[] = { [BPF_MAP_TYPE_STACK] = "stack", [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage", [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops", + [BPF_MAP_TYPE_RINGBUF] = "ringbuf", }; const size_t map_type_name_size = ARRAY_SIZE(map_type_name); @@ -1590,7 +1591,7 @@ static int do_help(int argc, char **argv) " lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n" " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n" " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n" - " queue | stack | sk_storage | struct_ops }\n" + " queue | stack | sk_storage | struct_ops | ringbuf }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, argv[-2]); diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index 3a3201e4618e..f4a01305d9a6 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -855,9 +855,11 @@ __SYSCALL(__NR_clone3, sys_clone3) __SYSCALL(__NR_openat2, sys_openat2) #define __NR_pidfd_getfd 438 __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) +#define __NR_faccessat2 439 +__SYSCALL(__NR_faccessat2, sys_faccessat2) #undef __NR_syscalls -#define __NR_syscalls 439 +#define __NR_syscalls 440 /* * 32 bit systems traditionally used different diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 2813e579b480..14b67cd6b54b 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -1969,6 +1969,30 @@ enum drm_i915_perf_property_id { */ DRM_I915_PERF_PROP_HOLD_PREEMPTION, + /** + * Specifying this pins all contexts to the specified SSEU power + * configuration for the duration of the recording. + * + * This parameter's value is a pointer to a struct + * drm_i915_gem_context_param_sseu. + * + * This property is available in perf revision 4. + */ + DRM_I915_PERF_PROP_GLOBAL_SSEU, + + /** + * This optional parameter specifies the timer interval in nanoseconds + * at which the i915 driver will check the OA buffer for available data. + * Minimum allowed value is 100 microseconds. A default value is used by + * the driver if this parameter is not specified. Note that larger timer + * values will reduce cpu consumption during OA perf captures. However, + * excessively large values would potentially result in OA buffer + * overwrites as captures reach end of the OA buffer. + * + * This property is available in perf revision 5. + */ + DRM_I915_PERF_PROP_POLL_OA_PERIOD, + DRM_I915_PERF_PROP_MAX /* non-ABI */ }; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 19684813faae..974a71342aea 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3168,7 +3168,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h index ca88b7bce553..2f86b2ad6d7e 100644 --- a/tools/include/uapi/linux/fcntl.h +++ b/tools/include/uapi/linux/fcntl.h @@ -84,10 +84,20 @@ #define DN_ATTRIB 0x00000020 /* File changed attibutes */ #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */ +/* + * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is + * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to + * unlinkat. The two functions do completely different things and therefore, + * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to + * faccessat would be undefined behavior and thus treating it equivalent to + * AT_EACCESS is valid undefined behavior. + */ #define AT_FDCWD -100 /* Special value used to indicate openat should use the current working directory. */ #define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */ +#define AT_EACCESS 0x200 /* Test access permitted for + effective IDs, not real IDs. */ #define AT_REMOVEDIR 0x200 /* Remove directory instead of unlinking file. */ #define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h index 379a612f8f1d..f44eb0a04afd 100644 --- a/tools/include/uapi/linux/fs.h +++ b/tools/include/uapi/linux/fs.h @@ -262,6 +262,7 @@ struct fsxattr { #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_DAX_FL 0x02000000 /* Inode is DAX */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h index a10e3cdc2839..7875709ccfeb 100644 --- a/tools/include/uapi/linux/fscrypt.h +++ b/tools/include/uapi/linux/fscrypt.h @@ -19,7 +19,8 @@ #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08 -#define FSCRYPT_POLICY_FLAGS_VALID 0x0F +#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10 +#define FSCRYPT_POLICY_FLAGS_VALID 0x1F /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index fdd632c833b4..4fdf30316582 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -188,10 +188,13 @@ struct kvm_s390_cmma_log { struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 +#define KVM_EXIT_HYPERV_SYNDBG 3 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; @@ -201,6 +204,15 @@ struct kvm_hyperv_exit { __u64 result; __u64 params[2]; } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; } u; }; @@ -1017,6 +1029,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_VCPU_RESETS 179 #define KVM_CAP_S390_PROTECTED 180 #define KVM_CAP_PPC_SECURE_GUEST 181 +#define KVM_CAP_HALT_POLL 182 +#define KVM_CAP_ASYNC_PF_INT 183 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index d1192783139a..82cc58fe9368 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h @@ -123,7 +123,10 @@ struct statx { __u32 stx_dev_major; /* ID of device containing file [uncond] */ __u32 stx_dev_minor; /* 0x90 */ - __u64 __spare2[14]; /* Spare space for future expansion */ + __u64 stx_mnt_id; + __u64 __spare2; + /* 0xa0 */ + __u64 __spare3[12]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -148,6 +151,7 @@ struct statx { #define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ #define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ +#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ @@ -177,7 +181,9 @@ struct statx { #define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ #define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ #define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ +#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */ #define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */ +#define STATX_ATTR_DAX 0x00002000 /* [I] File is DAX */ #endif /* _UAPI_LINUX_STAT_H */ diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h index 9fe72e4b1373..0c2349612e77 100644 --- a/tools/include/uapi/linux/vhost.h +++ b/tools/include/uapi/linux/vhost.h @@ -15,6 +15,8 @@ #include #include +#define VHOST_FILE_UNBIND -1 + /* ioctls */ #define VHOST_VIRTIO 0xAF @@ -140,4 +142,6 @@ /* Get the max ring size. */ #define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16) +/* Set event fd for config interrupt*/ +#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) #endif diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index e1bd2a93c6db..5b36c589a029 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name) return 0; } +static int append(char **buf, const char *delim, const char *str) +{ + char *new_buf; + + new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1); + if (!new_buf) + return -1; + strcat(new_buf, delim); + strcat(new_buf, str); + *buf = new_buf; + return 0; +} + static int event_read_fields(struct tep_event *event, struct tep_format_field **fields) { struct tep_format_field *field = NULL; enum tep_event_type type; char *token; char *last_token; + char *delim = " "; int count = 0; + int ret; do { unsigned int size_dynamic = 0; @@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->flags |= TEP_FIELD_IS_POINTER; if (field->type) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(last_token) + 2); - if (!new_type) { - free(last_token); - goto fail; - } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, last_token); + ret = append(&field->type, delim, last_token); free(last_token); + if (ret < 0) + goto fail; } else field->type = last_token; last_token = token; + delim = " "; continue; } + /* Handle __attribute__((user)) */ + if ((type == TEP_EVENT_DELIM) && + strcmp("__attribute__", last_token) == 0 && + token[0] == '(') { + int depth = 1; + int ret; + + ret = append(&field->type, " ", last_token); + ret |= append(&field->type, "", "("); + if (ret < 0) + goto fail; + + delim = " "; + while ((type = read_token(&token)) != TEP_EVENT_NONE) { + if (type == TEP_EVENT_DELIM) { + if (token[0] == '(') + depth++; + else if (token[0] == ')') + depth--; + if (!depth) + break; + ret = append(&field->type, "", token); + delim = ""; + } else { + ret = append(&field->type, delim, token); + delim = " "; + } + if (ret < 0) + goto fail; + free(last_token); + last_token = token; + } + continue; + } break; } @@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** if (strcmp(token, "[") == 0) { enum tep_event_type last_type = type; char *brackets = token; - char *new_brackets; - int len; field->flags |= TEP_FIELD_IS_ARRAY; @@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** field->arraylen = 0; while (strcmp(token, "]") != 0) { + const char *delim; + if (last_type == TEP_EVENT_ITEM && type == TEP_EVENT_ITEM) - len = 2; + delim = " "; else - len = 1; + delim = ""; + last_type = type; - new_brackets = realloc(brackets, - strlen(brackets) + - strlen(token) + len); - if (!new_brackets) { + ret = append(&brackets, delim, token); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - if (len == 2) - strcat(brackets, " "); - strcat(brackets, token); /* We only care about the last token */ field->arraylen = strtoul(token, NULL, 0); free_token(token); type = read_token(&token); if (type == TEP_EVENT_NONE) { + free(brackets); do_warning_event(event, "failed to find token"); goto fail; } @@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** free_token(token); - new_brackets = realloc(brackets, strlen(brackets) + 2); - if (!new_brackets) { + ret = append(&brackets, "", "]"); + if (ret < 0) { free(brackets); goto fail; } - brackets = new_brackets; - strcat(brackets, "]"); /* add brackets to type */ @@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** * the format: type [] item; */ if (type == TEP_EVENT_ITEM) { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(field->name) + - strlen(brackets) + 2); - if (!new_type) { + ret = append(&field->type, " ", field->name); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, " "); - strcat(field->type, field->name); + ret = append(&field->type, "", brackets); + size_dynamic = type_size(field->name); free_token(field->name); - strcat(field->type, brackets); field->name = field->alias = token; type = read_token(&token); } else { - char *new_type; - new_type = realloc(field->type, - strlen(field->type) + - strlen(brackets) + 1); - if (!new_type) { + ret = append(&field->type, "", brackets); + if (ret < 0) { free(brackets); goto fail; } - field->type = new_type; - strcat(field->type, brackets); } free(brackets); } @@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok) /* could just be a type pointer */ if ((strcmp(arg->op.op, "*") == 0) && type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) { - char *new_atom; + int ret; if (left->type != TEP_PRINT_ATOM) { do_warning_event(event, "bad pointer type"); goto out_free; } - new_atom = realloc(left->atom.atom, - strlen(left->atom.atom) + 3); - if (!new_atom) + ret = append(&left->atom.atom, " ", "*"); + if (ret < 0) goto out_warn_free; - left->atom.atom = new_atom; - strcat(left->atom.atom, " *"); free(arg->op.op); *arg = *left; free(left); @@ -3062,6 +3084,37 @@ process_func_handler(struct tep_event *event, struct tep_function_handler *func, return TEP_EVENT_ERROR; } +static enum tep_event_type +process_builtin_expect(struct tep_event *event, struct tep_print_arg *arg, char **tok) +{ + enum tep_event_type type; + char *token = NULL; + + /* Handle __builtin_expect( cond, #) */ + type = process_arg(event, arg, &token); + + if (type != TEP_EVENT_DELIM || token[0] != ',') + goto out_free; + + free_token(token); + + /* We don't care what the second parameter is of the __builtin_expect() */ + if (read_expect_type(TEP_EVENT_ITEM, &token) < 0) + goto out_free; + + if (read_expected(TEP_EVENT_DELIM, ")") < 0) + goto out_free; + + free_token(token); + type = read_token_item(tok); + return type; + +out_free: + free_token(token); + *tok = NULL; + return TEP_EVENT_ERROR; +} + static enum tep_event_type process_function(struct tep_event *event, struct tep_print_arg *arg, char *token, char **tok) @@ -3106,6 +3159,10 @@ process_function(struct tep_event *event, struct tep_print_arg *arg, free_token(token); return process_dynamic_array_len(event, arg, tok); } + if (strcmp(token, "__builtin_expect") == 0) { + free_token(token); + return process_builtin_expect(event, arg, tok); + } func = find_func_handler(event->tep, token); if (func) { @@ -3151,18 +3208,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg, } /* atoms can be more than one token long */ while (type == TEP_EVENT_ITEM) { - char *new_atom; - new_atom = realloc(atom, - strlen(atom) + strlen(token) + 2); - if (!new_atom) { + int ret; + + ret = append(&atom, " ", token); + if (ret < 0) { free(atom); *tok = NULL; free_token(token); return TEP_EVENT_ERROR; } - atom = new_atom; - strcat(atom, " "); - strcat(atom, token); free_token(token); type = read_token_item(&token); } diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index eda15a5a285e..3c5967748abb 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -84,4 +84,6 @@ unsigned long arch_jump_destination(struct instruction *insn); unsigned long arch_dest_rela_offset(int addend); +const char *arch_nop_insn(int len); + #endif /* _ARCH_H */ diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 4b504fc90bbb..9872195f998b 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -565,3 +565,21 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state) state->regs[16].base = CFI_CFA; state->regs[16].offset = -8; } + +const char *arch_nop_insn(int len) +{ + static const char nops[5][5] = { + /* 1 */ { 0x90 }, + /* 2 */ { 0x66, 0x90 }, + /* 3 */ { 0x0f, 0x1f, 0x00 }, + /* 4 */ { 0x0f, 0x1f, 0x40, 0x00 }, + /* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 }, + }; + + if (len < 1 || len > 5) { + WARN("invalid NOP size: %d\n", len); + return NULL; + } + + return nops[len-1]; +} diff --git a/tools/objtool/arch/x86/include/arch_elf.h b/tools/objtool/arch/x86/include/arch_elf.h new file mode 100644 index 000000000000..69cc4264b28a --- /dev/null +++ b/tools/objtool/arch/x86/include/arch_elf.h @@ -0,0 +1,6 @@ +#ifndef _OBJTOOL_ARCH_ELF +#define _OBJTOOL_ARCH_ELF + +#define R_NONE R_X86_64_NONE + +#endif /* _OBJTOOL_ARCH_ELF */ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5fbb90a80d23..5e0d70a89fb8 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -12,6 +12,7 @@ #include "check.h" #include "special.h" #include "warn.h" +#include "arch_elf.h" #include #include @@ -765,6 +766,24 @@ static int add_call_destinations(struct objtool_file *file) } else insn->call_dest = rela->sym; + /* + * Many compilers cannot disable KCOV with a function attribute + * so they need a little help, NOP out any KCOV calls from noinstr + * text. + */ + if (insn->sec->noinstr && + !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) { + if (rela) { + rela->type = R_NONE; + elf_write_rela(file->elf, rela); + } + + elf_write_insn(file->elf, insn->sec, + insn->offset, insn->len, + arch_nop_insn(insn->len)); + insn->type = INSN_NOP; + } + /* * Whatever stack impact regular CALLs have, should be undone * by the RETURN of the called function. @@ -2190,10 +2209,36 @@ static inline const char *call_dest_name(struct instruction *insn) return "{dynamic}"; } +static inline bool noinstr_call_dest(struct symbol *func) +{ + /* + * We can't deal with indirect function calls at present; + * assume they're instrumented. + */ + if (!func) + return false; + + /* + * If the symbol is from a noinstr section; we good. + */ + if (func->sec->noinstr) + return true; + + /* + * The __ubsan_handle_*() calls are like WARN(), they only happen when + * something 'BAD' happened. At the risk of taking the machine down, + * let them proceed to get the message out. + */ + if (!strncmp(func->name, "__ubsan_handle_", 15)) + return true; + + return false; +} + static int validate_call(struct instruction *insn, struct insn_state *state) { if (state->noinstr && state->instr <= 0 && - (!insn->call_dest || !insn->call_dest->sec->noinstr)) { + !noinstr_call_dest(insn->call_dest)) { WARN_FUNC("call to %s() leaves .noinstr.text section", insn->sec, insn->offset, call_dest_name(insn)); return 1; @@ -2740,13 +2785,13 @@ int check(const char *_objname, bool orc) objname = _objname; - file.elf = elf_open_read(objname, orc ? O_RDWR : O_RDONLY); + file.elf = elf_open_read(objname, O_RDWR); if (!file.elf) return 1; INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); - file.c_file = find_section_by_name(file.elf, ".comment"); + file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; @@ -2801,7 +2846,9 @@ int check(const char *_objname, bool orc) ret = create_orc_sections(&file); if (ret < 0) goto out; + } + if (file.elf->changed) { ret = elf_write(file.elf); if (ret < 0) goto out; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 84225679f96d..26d11d821941 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -529,8 +529,9 @@ static int read_relas(struct elf *elf) rela->addend = rela->rela.r_addend; rela->offset = rela->rela.r_offset; symndx = GELF_R_SYM(rela->rela.r_info); - rela->sym = find_symbol_by_index(elf, symndx); rela->sec = sec; + rela->idx = i; + rela->sym = find_symbol_by_index(elf, symndx); if (!rela->sym) { WARN("can't find rela entry symbol %d for %s", symndx, sec->name); @@ -713,6 +714,8 @@ struct section *elf_create_section(struct elf *elf, const char *name, elf_hash_add(elf->section_hash, &sec->hash, sec->idx); elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name)); + elf->changed = true; + return sec; } @@ -746,7 +749,7 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base) return sec; } -int elf_rebuild_rela_section(struct section *sec) +int elf_rebuild_rela_section(struct elf *elf, struct section *sec) { struct rela *rela; int nr, idx = 0, size; @@ -763,6 +766,9 @@ int elf_rebuild_rela_section(struct section *sec) return -1; } + sec->changed = true; + elf->changed = true; + sec->data->d_buf = relas; sec->data->d_size = size; @@ -779,7 +785,44 @@ int elf_rebuild_rela_section(struct section *sec) return 0; } -int elf_write(const struct elf *elf) +int elf_write_insn(struct elf *elf, struct section *sec, + unsigned long offset, unsigned int len, + const char *insn) +{ + Elf_Data *data = sec->data; + + if (data->d_type != ELF_T_BYTE || data->d_off) { + WARN("write to unexpected data for section: %s", sec->name); + return -1; + } + + memcpy(data->d_buf + offset, insn, len); + elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY); + + elf->changed = true; + + return 0; +} + +int elf_write_rela(struct elf *elf, struct rela *rela) +{ + struct section *sec = rela->sec; + + rela->rela.r_info = GELF_R_INFO(rela->sym->idx, rela->type); + rela->rela.r_addend = rela->addend; + rela->rela.r_offset = rela->offset; + + if (!gelf_update_rela(sec->data, rela->idx, &rela->rela)) { + WARN_ELF("gelf_update_rela"); + return -1; + } + + elf->changed = true; + + return 0; +} + +int elf_write(struct elf *elf) { struct section *sec; Elf_Scn *s; @@ -796,6 +839,8 @@ int elf_write(const struct elf *elf) WARN_ELF("gelf_update_shdr"); return -1; } + + sec->changed = false; } } @@ -808,6 +853,8 @@ int elf_write(const struct elf *elf) return -1; } + elf->changed = false; + return 0; } diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index f4fe1d6ea392..7324e772583e 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -64,9 +64,10 @@ struct rela { GElf_Rela rela; struct section *sec; struct symbol *sym; - unsigned int type; unsigned long offset; + unsigned int type; int addend; + int idx; bool jump_table_start; }; @@ -76,6 +77,7 @@ struct elf { Elf *elf; GElf_Ehdr ehdr; int fd; + bool changed; char *name; struct list_head sections; DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS); @@ -118,7 +120,11 @@ struct elf *elf_open_read(const char *name, int flags); struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr); struct section *elf_create_rela_section(struct elf *elf, struct section *base); void elf_add_rela(struct elf *elf, struct rela *rela); -int elf_write(const struct elf *elf); +int elf_write_insn(struct elf *elf, struct section *sec, + unsigned long offset, unsigned int len, + const char *insn); +int elf_write_rela(struct elf *elf, struct rela *rela); +int elf_write(struct elf *elf); void elf_close(struct elf *elf); struct section *find_section_by_name(const struct elf *elf, const char *name); @@ -130,7 +136,7 @@ struct rela *find_rela_by_dest(const struct elf *elf, struct section *sec, unsig struct rela *find_rela_by_dest_range(const struct elf *elf, struct section *sec, unsigned long offset, unsigned int len); struct symbol *find_func_containing(struct section *sec, unsigned long offset); -int elf_rebuild_rela_section(struct section *sec); +int elf_rebuild_rela_section(struct elf *elf, struct section *sec); #define for_each_sec(file, sec) \ list_for_each_entry(sec, &file->elf->sections, list) diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index c9549988121a..4c37f80eb987 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -222,7 +222,7 @@ int create_orc_sections(struct objtool_file *file) } } - if (elf_rebuild_rela_section(ip_relasec)) + if (elf_rebuild_rela_section(file->elf, ip_relasec)) return -1; return 0; diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 877ca6be0ed0..513633809c81 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -396,6 +396,18 @@ else NO_LIBBPF := 1 NO_JVMTI := 1 else + ifneq ($(filter s% -fsanitize=address%,$(EXTRA_CFLAGS),),) + ifneq ($(shell ldconfig -p | grep libasan >/dev/null 2>&1; echo $$?), 0) + msg := $(error No libasan found, please install libasan); + endif + endif + + ifneq ($(filter s% -fsanitize=undefined%,$(EXTRA_CFLAGS),),) + ifneq ($(shell ldconfig -p | grep libubsan >/dev/null 2>&1; echo $$?), 0) + msg := $(error No libubsan found, please install libubsan); + endif + endif + ifneq ($(filter s% -static%,$(LDFLAGS),),) msg := $(error No static glibc found, please install glibc-static); else diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 37b844f839bc..78847b32e137 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -359,6 +359,7 @@ 435 common clone3 sys_clone3 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index b63b3fb2de70..5f1d2a878fad 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -478,8 +478,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report if (rep->time_str) ret += fprintf(fp, " (time slices: %s)", rep->time_str); - if (symbol_conf.show_ref_callgraph && - strstr(evname, "call-graph=no")) { + if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) { ret += fprintf(fp, ", show reference callgraph"); } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 5da243676f12..181d65e5a450 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -3837,6 +3837,9 @@ int cmd_script(int argc, const char **argv) if (err) goto out_delete; + if (zstd_init(&(session->zstd_data), 0) < 0) + pr_warning("Decompression initialization failed. Reported data may be incomplete.\n"); + err = __cmd_script(&script); flush_scripting(); diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c index 811cc0eeb2d5..110f0c609d84 100644 --- a/tools/perf/trace/beauty/statx.c +++ b/tools/perf/trace/beauty/statx.c @@ -65,6 +65,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a P_FLAG(SIZE); P_FLAG(BLOCKS); P_FLAG(BTIME); + P_FLAG(MNT_ID); #undef P_FLAG diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c index b020a8678eb9..9887ae09242d 100644 --- a/tools/perf/util/bpf-prologue.c +++ b/tools/perf/util/bpf-prologue.c @@ -142,7 +142,8 @@ static int gen_read_mem(struct bpf_insn_pos *pos, int src_base_addr_reg, int dst_addr_reg, - long offset) + long offset, + int probeid) { /* mov arg3, src_base_addr_reg */ if (src_base_addr_reg != BPF_REG_ARG3) @@ -159,7 +160,7 @@ gen_read_mem(struct bpf_insn_pos *pos, ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos); /* Call probe_read */ - ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos); + ins(BPF_EMIT_CALL(probeid), pos); /* * Error processing: if read fail, goto error code, * will be relocated. Target should be the start of @@ -241,7 +242,7 @@ static int gen_prologue_slowpath(struct bpf_insn_pos *pos, struct probe_trace_arg *args, int nargs) { - int err, i; + int err, i, probeid; for (i = 0; i < nargs; i++) { struct probe_trace_arg *arg = &args[i]; @@ -276,11 +277,16 @@ gen_prologue_slowpath(struct bpf_insn_pos *pos, stack_offset), pos); ref = arg->ref; + probeid = BPF_FUNC_probe_read_kernel; while (ref) { pr_debug("prologue: arg %d: offset %ld\n", i, ref->offset); + + if (ref->user_access) + probeid = BPF_FUNC_probe_read_user; + err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7, - ref->offset); + ref->offset, probeid); if (err) { pr_err("prologue: failed to generate probe_read function call\n"); goto errout; diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index c4ca932d092d..acef87d9af58 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -26,7 +26,7 @@ do { \ YYABORT; \ } while (0) -static struct list_head* alloc_list() +static struct list_head* alloc_list(void) { struct list_head *list; @@ -349,7 +349,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc struct list_head *list; char pmu_name[128]; - snprintf(&pmu_name, 128, "%s-%s", $1, $3); + snprintf(pmu_name, sizeof(pmu_name), "%s-%s", $1, $3); free($1); free($3); if (parse_events_multi_pmu_add(_parse_state, pmu_name, &list) < 0) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 85e0c7f2515c..f971d9aa4570 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -86,7 +86,6 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, struct perf_pmu_info *info); struct list_head *perf_pmu__alias(struct perf_pmu *pmu, struct list_head *head_terms); -int perf_pmu_wrap(void); void perf_pmu_error(struct list_head *list, char *name, char const *msg); int perf_pmu__new_format(struct list_head *list, char *name, diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a08f373d3305..df713a5d1e26 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1575,7 +1575,7 @@ static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg) } tmp = strchr(str, '@'); - if (tmp && tmp != str && strcmp(tmp + 1, "user")) { /* user attr */ + if (tmp && tmp != str && !strcmp(tmp + 1, "user")) { /* user attr */ if (!user_access_is_supported()) { semantic_error("ftrace does not support user access\n"); return -EINVAL; @@ -1995,7 +1995,10 @@ static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref, if (depth < 0) return depth; } - err = strbuf_addf(buf, "%+ld(", ref->offset); + if (ref->user_access) + err = strbuf_addf(buf, "%s%ld(", "+u", ref->offset); + else + err = strbuf_addf(buf, "%+ld(", ref->offset); return (err < 0) ? err : depth; } diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index 8c852948513e..064b63a6a3f3 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -1044,7 +1044,7 @@ static struct { DEFINE_TYPE(FTRACE_README_PROBE_TYPE_X, "*type: * x8/16/32/64,*"), DEFINE_TYPE(FTRACE_README_KRETPROBE_OFFSET, "*place (kretprobe): *"), DEFINE_TYPE(FTRACE_README_UPROBE_REF_CTR, "*ref_ctr_offset*"), - DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*[u]*"), + DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*u]*"), DEFINE_TYPE(FTRACE_README_MULTIPROBE_EVENT, "*Create/append/*"), DEFINE_TYPE(FTRACE_README_IMMEDIATE_VALUE, "*\\imm-value,*"), }; diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 3c6976f7574c..57d0706e1330 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -668,7 +668,7 @@ static void print_aggr(struct perf_stat_config *config, int s; bool first; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; aggr_update_shadow(config, evlist); @@ -1169,7 +1169,7 @@ static void print_percore(struct perf_stat_config *config, int s; bool first = true; - if (!(config->aggr_map || config->aggr_get_id)) + if (!config->aggr_map || !config->aggr_get_id) return; if (config->percore_show_thread) diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c index eec23fa693bd..83844f8b862a 100644 --- a/tools/spi/spidev_test.c +++ b/tools/spi/spidev_test.c @@ -47,7 +47,7 @@ static int transfer_size; static int iterations; static int interval = 5; /* interval in seconds for showing transfer rate */ -uint8_t default_tx[] = { +static uint8_t default_tx[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x40, 0x00, 0x00, 0x00, 0x00, 0x95, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, @@ -56,8 +56,8 @@ uint8_t default_tx[] = { 0xF0, 0x0D, }; -uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, }; -char *input_tx; +static uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, }; +static char *input_tx; static void hex_dump(const void *src, size_t length, size_t line_size, char *prefix) @@ -461,8 +461,8 @@ int main(int argc, char *argv[]) pabort("can't get max speed hz"); printf("spi mode: 0x%x\n", mode); - printf("bits per word: %d\n", bits); - printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); + printf("bits per word: %u\n", bits); + printf("max speed: %u Hz (%u kHz)\n", speed, speed/1000); if (input_tx) transfer_escaped_string(fd, input_tx); diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h index db3c07beb9d1..b5f7a996c4d0 100644 --- a/tools/testing/nvdimm/test/nfit_test.h +++ b/tools/testing/nvdimm/test/nfit_test.h @@ -51,7 +51,7 @@ struct nd_cmd_translate_spa { __u32 nfit_device_handle; __u32 _reserved; __u64 dpa; - } __packed devices[0]; + } __packed devices[]; } __packed; @@ -74,7 +74,7 @@ struct nd_cmd_ars_err_inj_stat { struct nd_error_stat_query_record { __u64 err_inj_stat_spa_range_base; __u64 err_inj_stat_spa_range_length; - } __packed record[0]; + } __packed record[]; } __packed; #define ND_INTEL_SMART 1 @@ -180,7 +180,7 @@ struct nd_intel_fw_send_data { __u32 context; __u32 offset; __u32 length; - __u8 data[0]; + __u8 data[]; /* this field is not declared due ot variable data from input */ /* __u32 status; */ } __packed; diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile index b497cfea4643..ac4ad0005715 100644 --- a/tools/testing/selftests/arm64/signal/Makefile +++ b/tools/testing/selftests/arm64/signal/Makefile @@ -21,10 +21,6 @@ include ../../lib.mk $(TEST_GEN_PROGS): $(PROGS) cp $(PROGS) $(OUTPUT)/ -clean: - $(CLEAN) - rm -f $(PROGS) - # Common test-unit targets to build common-layout test-cases executables # Needs secondary expansion to properly include the testcase c-file in pre-reqs .SECONDEXPANSION: diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 2061a6beac0f..5f54c6aec7f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -13,6 +13,7 @@ static int getsetsockopt(void) char cc[16]; /* TCP_CA_NAME_MAX */ } buf = {}; socklen_t optlen; + char *big_buf = NULL; fd = socket(AF_INET, SOCK_STREAM, 0); if (fd < 0) { @@ -22,24 +23,31 @@ static int getsetsockopt(void) /* IP_TOS - BPF bypass */ - buf.u8[0] = 0x08; - err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1); + optlen = getpagesize() * 2; + big_buf = calloc(1, optlen); + if (!big_buf) { + log_err("Couldn't allocate two pages"); + goto err; + } + + *(int *)big_buf = 0x08; + err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen); if (err) { log_err("Failed to call setsockopt(IP_TOS)"); goto err; } - buf.u8[0] = 0x00; + memset(big_buf, 0, optlen); optlen = 1; - err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen); + err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen); if (err) { log_err("Failed to call getsockopt(IP_TOS)"); goto err; } - if (buf.u8[0] != 0x08) { - log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08", - buf.u8[0]); + if (*(int *)big_buf != 0x08) { + log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08", + *(int *)big_buf); goto err; } @@ -78,6 +86,28 @@ static int getsetsockopt(void) goto err; } + /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */ + + optlen = getpagesize() * 2; + memset(big_buf, 0, optlen); + + err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen); + if (err != 0) { + log_err("Failed to call setsockopt, ret=%d", err); + goto err; + } + + err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen); + if (err != 0) { + log_err("Failed to call getsockopt, ret=%d", err); + goto err; + } + + if (optlen != 1 || *(__u8 *)big_buf != 0x55) { + log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x", + optlen, *(__u8 *)big_buf); + } + /* SO_SNDBUF is overwritten */ buf.u32 = 0x01010101; @@ -124,9 +154,11 @@ static int getsetsockopt(void) goto err; } + free(big_buf); close(fd); return 0; err: + free(big_buf); close(fd); return -1; } diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c index 7897c8f4d363..ef574087f1e1 100644 --- a/tools/testing/selftests/bpf/progs/bpf_cubic.c +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { - if (ca->curr_rtt > delay) - ca->curr_rtt = delay; - ca->sample_cnt++; } else { if (ca->curr_rtt > ca->delay_min + diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index d5a5eeb5fb52..712df7b49cb1 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -8,6 +8,10 @@ char _license[] SEC("license") = "GPL"; __u32 _version SEC("version") = 1; +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + #define SOL_CUSTOM 0xdeadbeef struct sockopt_sk { @@ -28,12 +32,14 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; struct sockopt_sk *storage; - if (ctx->level == SOL_IP && ctx->optname == IP_TOS) + if (ctx->level == SOL_IP && ctx->optname == IP_TOS) { /* Not interested in SOL_IP:IP_TOS; * let next BPF program in the cgroup chain or kernel * handle it. */ + ctx->optlen = 0; /* bypass optval>PAGE_SIZE */ return 1; + } if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) { /* Not interested in SOL_SOCKET:SO_SNDBUF; @@ -51,6 +57,26 @@ int _getsockopt(struct bpf_sockopt *ctx) return 1; } + if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { + if (optval + 1 > optval_end) + return 0; /* EPERM, bounds check */ + + ctx->retval = 0; /* Reset system call return value to zero */ + + /* Always export 0x55 */ + optval[0] = 0x55; + ctx->optlen = 1; + + /* Userspace buffer is PAGE_SIZE * 2, but BPF + * program can only see the first PAGE_SIZE + * bytes of data. + */ + if (optval_end - optval != PAGE_SIZE) + return 0; /* EPERM, unexpected data size */ + + return 1; + } + if (ctx->level != SOL_CUSTOM) return 0; /* EPERM, deny everything except custom level */ @@ -81,12 +107,14 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; struct sockopt_sk *storage; - if (ctx->level == SOL_IP && ctx->optname == IP_TOS) + if (ctx->level == SOL_IP && ctx->optname == IP_TOS) { /* Not interested in SOL_IP:IP_TOS; * let next BPF program in the cgroup chain or kernel * handle it. */ + ctx->optlen = 0; /* bypass optval>PAGE_SIZE */ return 1; + } if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) { /* Overwrite SO_SNDBUF value */ @@ -112,6 +140,28 @@ int _setsockopt(struct bpf_sockopt *ctx) return 1; } + if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { + /* Original optlen is larger than PAGE_SIZE. */ + if (ctx->optlen != PAGE_SIZE * 2) + return 0; /* EPERM, unexpected data size */ + + if (optval + 1 > optval_end) + return 0; /* EPERM, bounds check */ + + /* Make sure we can trim the buffer. */ + optval[0] = 0; + ctx->optlen = 1; + + /* Usepace buffer is PAGE_SIZE * 2, but BPF + * program can only see the first PAGE_SIZE + * bytes of data. + */ + if (optval_end - optval != PAGE_SIZE) + return 0; /* EPERM, unexpected data size */ + + return 1; + } + if (ctx->level != SOL_CUSTOM) return 0; /* EPERM, deny everything except custom level */ diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index a4605b5ee66d..8ec1922e974e 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -263,10 +263,16 @@ CASENO=0 testcase() { # testfile CASENO=$((CASENO+1)) - desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:` + desc=`grep "^#[ \t]*description:" $1 | cut -f2- -d:` prlog -n "[$CASENO]$INSTANCE$desc" } +checkreq() { # testfile + requires=`grep "^#[ \t]*requires:" $1 | cut -f2- -d:` + # Use eval to pass quoted-patterns correctly. + eval check_requires "$requires" +} + test_on_instance() { # testfile grep -q "^#[ \t]*flags:.*instance" $1 } @@ -356,7 +362,8 @@ trap 'SIG_RESULT=$XFAIL' $SIG_XFAIL __run_test() { # testfile # setup PID and PPID, $$ is not updated. - (cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x; initialize_ftrace; . $1) + (cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x; + checkreq $1; initialize_ftrace; . $1) [ $? -ne 0 ] && kill -s $SIG_FAIL $SIG_PID } diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc index 3b1f45e13a2e..13b4dabcf46e 100644 --- a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc +++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc @@ -1,9 +1,8 @@ #!/bin/sh # description: Snapshot and tracing setting +# requires: snapshot # flags: instance -[ ! -f snapshot ] && exit_unsupported - echo "Set tracing off" echo 0 > tracing_on diff --git a/tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc b/tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc index 5058fbcfd90f..435d07b13407 100644 --- a/tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc +++ b/tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc @@ -1,10 +1,9 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: trace_pipe and trace_marker +# requires: trace_marker # flags: instance -[ ! -f trace_marker ] && exit_unsupported - echo "test input 1" > trace_marker : "trace interface never consume the ring buffer" diff --git a/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc b/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc index 801ecb63e84c..e52e470a1f8f 100644 --- a/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc +++ b/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Test ftrace direct functions against kprobes +# requires: kprobe_events rmmod ftrace-direct ||: if ! modprobe ftrace-direct ; then @@ -8,11 +9,6 @@ if ! modprobe ftrace-direct ; then exit_unresolved; fi -if [ ! -f kprobe_events ]; then - echo "No kprobe_events file -please build CONFIG_KPROBE_EVENTS" - exit_unresolved; -fi - echo "Let the module run a little" sleep 1 diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc index c6d8387dbbb8..68550f97d3c3 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc @@ -1,11 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Generic dynamic event - add/remove kprobe events - -[ -f dynamic_events ] || exit_unsupported - -grep -q "place: \[:\]" README || exit_unsupported -grep -q "place (kretprobe): \[:\]" README || exit_unsupported +# requires: dynamic_events "place: [:]":README "place (kretprobe): [:]":README echo 0 > events/enable echo > dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc index 62b77b5941d0..2b94611e1a28 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Generic dynamic event - add/remove synthetic events - -[ -f dynamic_events ] || exit_unsupported - -grep -q "s:\[synthetic/\]" README || exit_unsupported +# requires: dynamic_events "s:[synthetic/]":README echo 0 > events/enable echo > dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc b/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc index e0842109cb57..c969be9eb7de 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc @@ -1,16 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Generic dynamic event - selective clear (compatibility) - -[ -f dynamic_events ] || exit_unsupported - -grep -q "place: \[:\]" README || exit_unsupported -grep -q "place (kretprobe): \[:\]" README || exit_unsupported - -grep -q "s:\[synthetic/\]" README || exit_unsupported - -[ -f synthetic_events ] || exit_unsupported -[ -f kprobe_events ] || exit_unsupported +# requires: dynamic_events kprobe_events synthetic_events "place: [:]":README "place (kretprobe): [:]":README "s:[synthetic/]":README echo 0 > events/enable echo > dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc b/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc index 901922e97878..16d543eaac88 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc @@ -1,13 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Generic dynamic event - generic clear event - -[ -f dynamic_events ] || exit_unsupported - -grep -q "place: \[:\]" README || exit_unsupported -grep -q "place (kretprobe): \[:\]" README || exit_unsupported - -grep -q "s:\[synthetic/\]" README || exit_unsupported +# requires: dynamic_events "place: [:]":README "place (kretprobe): [:]":README "s:[synthetic/]":README echo 0 > events/enable echo > dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc index dfb0d5122f7b..cfe5bd2d4267 100644 --- a/tools/testing/selftests/ftrace/test.d/event/event-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/event-enable.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event tracing - enable/disable with event level files +# requires: set_event events/sched # flags: instance do_reset() { @@ -13,11 +14,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - echo 'sched:sched_switch' > set_event yield diff --git a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc index f0f366f18d0c..e6eb78f0b954 100644 --- a/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event tracing - restricts events based on pid notrace filtering +# requires: set_event events/sched set_event_pid set_event_notrace_pid # flags: instance do_reset() { @@ -56,16 +57,6 @@ enable_events() { echo 1 > tracing_on } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f set_event_pid -o ! -f set_event_notrace_pid ]; then - echo "event pid notrace filtering is not supported" - exit_unsupported -fi - echo 0 > options/event-fork do_reset diff --git a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc index f9cb214220b1..7f5f97dffdc3 100644 --- a/tools/testing/selftests/ftrace/test.d/event/event-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/event/event-pid.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event tracing - restricts events based on pid +# requires: set_event set_event_pid events/sched # flags: instance do_reset() { @@ -16,16 +17,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f set_event_pid ]; then - echo "event pid filtering is not supported" - exit_unsupported -fi - echo 0 > options/event-fork echo 1 > events/sched/sched_switch/enable diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc index 83a8c571e93a..b1ede6249866 100644 --- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event tracing - enable/disable with subsystem level files +# requires: set_event events/sched/enable # flags: instance do_reset() { @@ -13,11 +14,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - echo 'sched:*' > set_event yield diff --git a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc index 84d7bda08d2a..93c10ea42a68 100644 --- a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc +++ b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event tracing - enable/disable with top level files +# requires: available_events set_event events/enable do_reset() { echo > set_event @@ -12,11 +13,6 @@ fail() { #msg exit_fail } -if [ ! -f available_events -o ! -f set_event -o ! -d events ]; then - echo "event tracing is not supported" - exit_unsupported -fi - echo '*:*' > set_event yield diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc index f59853857ad2..cf3ea42b12b0 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc @@ -1,17 +1,11 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function graph filters with stack tracer +# requires: stack_trace set_ftrace_filter function_graph:tracer # Make sure that function graph filtering works, and is not # affected by other tracers enabled (like stack tracer) -if ! grep -q function_graph available_tracers; then - echo "no function graph tracer configured" - exit_unsupported -fi - -check_filter_file set_ftrace_filter - do_reset() { if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then echo 0 > /proc/sys/kernel/stack_tracer_enabled @@ -37,12 +31,6 @@ fi echo function_graph > current_tracer -if [ ! -f stack_trace ]; then - echo "Stack tracer not configured" - do_reset - exit_unsupported; -fi - echo "Now testing with stack tracer" echo 1 > /proc/sys/kernel/stack_tracer_enabled diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc index d610f47edd90..b3ccdaec2a61 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc @@ -1,16 +1,10 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function graph filters +# requires: set_ftrace_filter function_graph:tracer # Make sure that function graph filtering works -if ! grep -q function_graph available_tracers; then - echo "no function graph tracer configured" - exit_unsupported -fi - -check_filter_file set_ftrace_filter - fail() { # msg echo $1 exit_fail diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 28936f434ee5..4b994b6df5ac 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -1,16 +1,10 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function glob filters +# requires: set_ftrace_filter function:tracer # Make sure that function glob matching filter works. -if ! grep -q function available_tracers; then - echo "no function tracer configured" - exit_unsupported -fi - -check_filter_file set_ftrace_filter - disable_tracing clear_trace diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc index 71db68a7975f..acb17ce543d2 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc @@ -1,22 +1,11 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function pid notrace filters +# requires: set_ftrace_notrace_pid set_ftrace_filter function:tracer # flags: instance # Make sure that function pid matching filter with notrace works. -if ! grep -q function available_tracers; then - echo "no function tracer configured" - exit_unsupported -fi - -if [ ! -f set_ftrace_notrace_pid ]; then - echo "set_ftrace_notrace_pid not found? Is function tracer not set?" - exit_unsupported -fi - -check_filter_file set_ftrace_filter - do_function_fork=1 if [ ! -f options/function-fork ]; then diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc index d58403c4b7cd..9f0a9687c773 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc @@ -1,23 +1,12 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function pid filters +# requires: set_ftrace_pid set_ftrace_filter function:tracer # flags: instance # Make sure that function pid matching filter works. # Also test it on an instance directory -if ! grep -q function available_tracers; then - echo "no function tracer configured" - exit_unsupported -fi - -if [ ! -f set_ftrace_pid ]; then - echo "set_ftrace_pid not found? Is function tracer not set?" - exit_unsupported -fi - -check_filter_file set_ftrace_filter - do_function_fork=1 if [ ! -f options/function-fork ]; then diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc index b2aff786c1a2..0f41e441c203 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc @@ -1,10 +1,9 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - stacktrace filter command +# requires: set_ftrace_filter # flags: instance -check_filter_file set_ftrace_filter - echo _do_fork:stacktrace >> set_ftrace_filter grep -q "_do_fork:stacktrace:unlimited" set_ftrace_filter diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc index 71fa3f49e35e..0c6cf7725110 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function trace with cpumask +# requires: function:tracer if ! which nproc ; then nproc() { @@ -15,11 +16,6 @@ if [ $NP -eq 1 ] ;then exit_unresolved fi -if ! grep -q "function" available_tracers ; then - echo "Function trace is not enabled" - exit_unsupported -fi - ORIG_CPUMASK=`cat tracing_cpumask` do_reset() { diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index e9b1fd534e96..3145b0f1835c 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -3,15 +3,14 @@ # description: ftrace - test for function event triggers # flags: instance # +# The triggers are set within the set_ftrace_filter file +# requires: set_ftrace_filter +# # Ftrace allows to add triggers to functions, such as enabling or disabling # tracing, enabling or disabling trace events, or recording a stack trace # within the ring buffer. # # This test is designed to test event triggers -# - -# The triggers are set within the set_ftrace_filter file -check_filter_file set_ftrace_filter do_reset() { reset_ftrace_filter diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc index 1a4b4a442d33..37c8feb9078b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function trace on module - -check_filter_file set_ftrace_filter +# requires: set_ftrace_filter : "mod: allows to filter a non exist function" echo 'non_exist_func:mod:non_exist_module' > set_ftrace_filter diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc index 0d501058aa75..4daeffb02fd8 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function profiling - -[ ! -f function_profile_enabled ] && exit_unsupported +# requires: function_profile_enabled : "Enable function profile" echo 1 > function_profile_enabled diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc index a3dadb6b93b4..1dbd766c0cd2 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - function profiler with function tracing +# requires: function_profile_enabled set_ftrace_filter function_graph:tracer # There was a bug after a rewrite of the ftrace infrastructure that # caused the function_profiler not to be able to run with the function @@ -13,17 +14,6 @@ # This test triggers those bugs on those kernels. # # We need function_graph and profiling to to run this test -if ! grep -q function_graph available_tracers; then - echo "no function graph tracer configured" - exit_unsupported; -fi - -check_filter_file set_ftrace_filter - -if [ ! -f function_profile_enabled ]; then - echo "function_profile_enabled not found, function profiling enabled?" - exit_unsupported -fi fail() { # mesg echo $1 diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc index 70bad441fa7d..e96e279e0533 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc @@ -2,6 +2,9 @@ # SPDX-License-Identifier: GPL-2.0 # description: ftrace - test reading of set_ftrace_filter # +# The triggers are set within the set_ftrace_filter file +# requires: set_ftrace_filter +# # The set_ftrace_filter file of ftrace is used to list functions as well as # triggers (probes) attached to functions. The code to read this file is not # straight forward and has had various bugs in the past. This test is designed @@ -9,9 +12,6 @@ # file in various ways (cat vs dd). # -# The triggers are set within the set_ftrace_filter file -check_filter_file set_ftrace_filter - fail() { # mesg echo $1 exit_fail diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc index 51e9e80bc0e6..61264e422699 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc @@ -1,15 +1,9 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - Max stack tracer +# requires: stack_trace stack_trace_filter # Test the basic function of max-stack usage tracing -if [ ! -f stack_trace ]; then - echo "Max stack tracer is not supported - please make CONFIG_STACK_TRACER=y" - exit_unsupported -fi - -check_filter_file stack_trace_filter - echo > stack_trace_filter echo 0 > stack_max_size echo 1 > /proc/sys/kernel/stack_tracer_enabled diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc index 3ed173f2944a..aee22289536b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc @@ -3,6 +3,9 @@ # description: ftrace - test for function traceon/off triggers # flags: instance # +# The triggers are set within the set_ftrace_filter file +# requires: set_ftrace_filter +# # Ftrace allows to add triggers to functions, such as enabling or disabling # tracing, enabling or disabling trace events, or recording a stack trace # within the ring buffer. @@ -10,9 +13,6 @@ # This test is designed to test enabling and disabling tracing triggers # -# The triggers are set within the set_ftrace_filter file -check_filter_file set_ftrace_filter - fail() { # mesg echo $1 exit_fail diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc index 23465823532b..6c190620db47 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc @@ -1,21 +1,15 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: ftrace - test tracing error log support +# event tracing is currently the only ftrace tracer that uses the +# tracing error_log, hence this check +# requires: set_event error_log fail() { #msg echo $1 exit_fail } -# event tracing is currently the only ftrace tracer that uses the -# tracing error_log, hence this check -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -[ -f error_log ] || exit_unsupported - ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' exit 0 diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 697c77ef2e2b..c5dec55b7d95 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -1,10 +1,3 @@ -check_filter_file() { # check filter file introduced by dynamic ftrace - if [ ! -f "$1" ]; then - echo "$1 not found? Is dynamic ftrace not set?" - exit_unsupported - fi -} - clear_trace() { # reset trace output echo > trace } @@ -113,6 +106,27 @@ initialize_ftrace() { # Reset ftrace to initial-state enable_tracing } +check_requires() { # Check required files and tracers + for i in "$@" ; do + r=${i%:README} + t=${i%:tracer} + if [ $t != $i ]; then + if ! grep -wq $t available_tracers ; then + echo "Required tracer $t is not configured." + exit_unsupported + fi + elif [ $r != $i ]; then + if ! grep -Fq "$r" README ; then + echo "Required feature pattern \"$r\" is not in README." + exit_unsupported + fi + elif [ ! -e $i ]; then + echo "Required feature interface $i doesn't exist." + exit_unsupported + fi + done +} + LOCALHOST=127.0.0.1 yield() { diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc index 4fa0f79144f4..0eb47fbb3f44 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc @@ -1,11 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Test creation and deletion of trace instances while setting an event - -if [ ! -d instances ] ; then - echo "no instance directory with this kernel" - exit_unsupported; -fi +# requires: instances fail() { # mesg rmdir foo 2>/dev/null diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc index b84651283bf3..607521d2592b 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc @@ -1,11 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Test creation and deletion of trace instances - -if [ ! -d instances ] ; then - echo "no instance directory with this kernel" - exit_unsupported; -fi +# requires: instances fail() { # mesg rmdir x y z 2>/dev/null diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc index bb1eb5a7c64e..eba858c21815 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event - adding and removing - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events echo p:myevent _do_fork > kprobe_events grep myevent kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc index 442c1a8c5edf..d10bf4f05bc8 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event - busy event check - -[ -f kprobe_events ] || exit_unsupported +# requires: kprobe_events echo p:myevent _do_fork > kprobe_events test -d events/kprobes/myevent diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc index bcdecf80a8f1..61f2ac441aec 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event with arguments - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events echo 'p:testprobe _do_fork $stack $stack0 +0($stack)' > kprobe_events grep testprobe kprobe_events | grep -q 'arg1=\$stack arg2=\$stack0 arg3=+0(\$stack)' diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc index 15c1f70fcaf9..05aaeed6987f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event with comm arguments - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events grep -A1 "fetcharg:" README | grep -q "\$comm" || exit_unsupported # this is too old diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index 46e7744f8358..b5fa05443b39 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event string type argument - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events case `uname -m` in x86_64) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc index 2b6dd33f9076..b8c75a3d003c 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event symbol argument - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events SYMBOL="linux_proc_banner" diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 6f0f19953193..474ca1a9a088 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event argument syntax - -[ -f kprobe_events ] || exit_unsupported # this is configurable - -grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue +# requires: kprobe_events "x8/16/32/64":README PROBEFUNC="vfs_read" GOODREG= diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc index 81490ecaaa92..0610e0b5587c 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobes event arguments with types - -[ -f kprobe_events ] || exit_unsupported # this is configurable - -grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue +# requires: kprobe_events "x8/16/32/64":README gen_event() { # Bitsize echo "p:testprobe _do_fork \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1" diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc index 0f60087583d8..a30a9c07290d 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc @@ -1,10 +1,8 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event user-memory access +# requires: kprobe_events '$arg':README -[ -f kprobe_events ] || exit_unsupported # this is configurable - -grep -q '\$arg' README || exit_unresolved # depends on arch grep -A10 "fetcharg:" README | grep -q 'ustring' || exit_unsupported grep -A10 "fetcharg:" README | grep -q '\[u\]' || exit_unsupported diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc index 3ff236719b6e..1f6981ef7afa 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event auto/manual naming - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events :;: "Add an event on function without name" ;: diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc index df5072815b87..81d8b58c03bc 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc @@ -1,11 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event with function tracer - -[ -f kprobe_events ] || exit_unsupported # this is configurable -grep "function" available_tracers || exit_unsupported # this is configurable - -check_filter_file set_ftrace_filter +# requires: kprobe_events stack_trace_filter function:tracer # prepare echo nop > current_tracer diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc index d861bd776c5e..7e74ee11edf9 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event - probing module - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events rmmod trace-printk ||: if ! modprobe trace-printk ; then diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc index 44494bac86d1..366b7e1b6718 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Create/delete multiprobe on kprobe event - -[ -f kprobe_events ] || exit_unsupported - -grep -q "Create/append/" README || exit_unsupported +# requires: kprobe_events "Create/append/":README # Choose 2 symbols for target SYM1=_do_fork diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc index eb0f4ab4e070..b4d834675e59 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe event parser error log check - -[ -f kprobe_events ] || exit_unsupported # this is configurable - -[ -f error_log ] || exit_unsupported +# requires: kprobe_events error_log check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'trace_kprobe' "$1" 'kprobe_events' diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc index ac9ab4a12e53..523fde6d1aa5 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kretprobe dynamic event with arguments - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events # Add new kretprobe event echo 'r:testprobe2 _do_fork $retval' > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc index 8e05b178519a..4f0b268c1233 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc @@ -1,9 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kretprobe dynamic event with maxactive - -[ -f kprobe_events ] || exit_unsupported # this is configurable -grep -q 'r\[maxactive\]' README || exit_unsupported # this is older version +# requires: kprobe_events 'r[maxactive]':README # Test if we successfully reject unknown messages if echo 'a:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc index 6e3dbe5f96b7..312d23780096 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Register/unregister many kprobe events - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events # ftrace fentry skip size depends on the machine architecture. # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc index a902aa0aaabc..624269c8d534 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe events - probe points - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events TARGET_FUNC=tracefs_create_dir diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc index 0384b525cdee..ff6c44adc8a0 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc @@ -1,8 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Kprobe dynamic event - adding and removing - -[ -f kprobe_events ] || exit_unsupported # this is configurable +# requires: kprobe_events ! grep -q 'myevent' kprobe_profile echo p:myevent _do_fork > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc index 14229d5778a0..7b5b60c3c5a2 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc @@ -1,10 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Uprobe event parser error log check - -[ -f uprobe_events ] || exit_unsupported # this is configurable - -[ -f error_log ] || exit_unsupported +# requires: uprobe_events error_log check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'trace_uprobe' "$1" 'uprobe_events' diff --git a/tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc b/tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc index 2b82c80edf69..22bff122b933 100644 --- a/tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc +++ b/tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: test for the preemptirqsoff tracer +# requires: preemptoff:tracer irqsoff:tracer MOD=preemptirq_delay_test @@ -27,9 +28,6 @@ unres() { #msg modprobe $MOD || unres "$MOD module not available" rmmod $MOD -grep -q "preemptoff" available_tracers || unsup "preemptoff tracer not enabled" -grep -q "irqsoff" available_tracers || unsup "irqsoff tracer not enabled" - reset_tracer # Simulate preemptoff section for half a second couple of times diff --git a/tools/testing/selftests/ftrace/test.d/template b/tools/testing/selftests/ftrace/test.d/template index e1a5d14c4eaf..2cd8947edf72 100644 --- a/tools/testing/selftests/ftrace/test.d/template +++ b/tools/testing/selftests/ftrace/test.d/template @@ -1,6 +1,10 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: %HERE DESCRIBE WHAT THIS DOES% +# requires: %HERE LIST THE REQUIRED FILES, TRACERS OR README-STRINGS% +# The required tracer needs :tracer suffix, e.g. function:tracer +# The required README string needs :README suffix, e.g. "x8/16/32/64":README +# and the README string is treated as a fixed-string instead of regexp pattern. # you have to add ".tc" extention for your testcase file # Note that all tests are run with "errexit" option. diff --git a/tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc b/tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc index b0893d7edda3..11be10e1bf96 100644 --- a/tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc +++ b/tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc @@ -1,17 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Test wakeup tracer +# requires: wakeup:tracer if ! which chrt ; then echo "chrt is not found. This test requires nice command." exit_unresolved fi -if ! grep -wq "wakeup" available_tracers ; then - echo "wakeup tracer is not supported" - exit_unsupported -fi - echo wakeup > current_tracer echo 1 > tracing_on echo 0 > tracing_max_latency diff --git a/tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc b/tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc index b9b6669a623b..3a77198b3c69 100644 --- a/tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc +++ b/tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc @@ -1,17 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Test wakeup RT tracer +# requires: wakeup_rt:tracer if ! which chrt ; then echo "chrt is not found. This test requires chrt command." exit_unresolved fi -if ! grep -wq "wakeup_rt" available_tracers ; then - echo "wakeup_rt tracer is not supported" - exit_unsupported -fi - echo wakeup_rt > current_tracer echo 1 > tracing_on echo 0 > tracing_max_latency diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc index 3f2aee115f6e..1590d6bfb857 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc @@ -1,24 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger expected fail actions +# requires: set_event snapshot "snapshot()":README fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f snapshot ]; then - echo "snapshot is not supported" - exit_unsupported -fi - -grep -q "snapshot()" README || exit_unsupported # version issue - echo "Test expected snapshot action failure" echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> events/sched/sched_waking/trigger && exit_fail diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc index e232059a8ab2..41119e0440e9 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test field variable support +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test field variable support" echo 'wakeup_latency u64 lat; pid_t pid; int prio; char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc index 07cfcb8157b6..7449a4b8f1f9 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event combined histogram trigger +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test create synthetic event" echo 'waking_latency u64 lat pid_t pid' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc index 73e413c2ca26..3ad6e3fd8ac9 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test multiple actions on hist trigger +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test multiple actions on hist trigger" echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events TRIGGER1=events/sched/sched_wakeup/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc index c80007aa9f86..adaabb873ed4 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc @@ -1,19 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger onchange action +# requires: set_event "onchange(var)":README fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -grep -q "onchange(var)" README || exit_unsupported # version issue - echo "Test onchange action" echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> events/sched/sched_waking/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc index ebe0ad827f9f..20e39471052e 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger onmatch action +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc index 2a2ef767249e..f4b03ab7c287 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger onmatch-onmax action +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc index 98d73bfb0296..71c9b5911c70 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger onmax action +# requires: set_event synthetic_events events/sched/sched_process_fork/hist fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc index 01b01b9c4e07..67fa328b830f 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc @@ -1,31 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger snapshot action +# requires: set_event snapshot events/sched/sched_process_fork/hist "onchange(var)":README "snapshot()":README fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - -if [ ! -f snapshot ]; then - echo "snapshot is not supported" - exit_unsupported -fi - -grep -q "onchange(var)" README || exit_unsupported # version issue - -grep -q "snapshot()" README || exit_unsupported # version issue - echo "Test snapshot action" echo 1 > events/sched/enable diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc index df44b14724a4..a152b558b40a 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc @@ -1,22 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test synthetic event create remove +# requires: set_event synthetic_events fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc index 88e6c3f43006..59216f3cfb12 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test synthetic_events syntax parser +# requires: set_event synthetic_events do_reset() { reset_trigger @@ -14,16 +15,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - reset_tracer do_reset diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc index c3baa486aeb4..c126d2350a6d 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc @@ -1,29 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger trace action +# requires: set_event synthetic_events events/sched/sched_process_fork/hist "trace(":README fail() { #msg echo $1 exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic event is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - -grep -q "trace(" README || exit_unsupported # version issue - echo "Test create synthetic event" echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc index eddb51e1fbf7..c226acee74bf 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test event enable/disable trigger +# requires: set_event events/sched/sched_process_fork/trigger # flags: instance fail() { #msg @@ -8,16 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - FEATURE=`grep enable_event events/sched/sched_process_fork/trigger` if [ -z "$FEATURE" ]; then echo "event enable/disable trigger is not supported" diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc index 2dcc2296ebdd..d9a198cb0f81 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test trigger filter +# requires: set_event events/sched/sched_process_fork/trigger # flags: instance fail() { #msg @@ -8,16 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - echo "Test trigger filter" echo 1 > tracing_on echo 'traceoff if child_pid == 0' > events/sched/sched_process_fork/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc index fab4431639d3..4562e13cb26b 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test histogram modifiers +# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist # flags: instance fail() { #msg @@ -8,21 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test histogram with execname modifier" echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc index d44087a2f3d1..52cfe7828e8a 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc @@ -1,23 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test histogram parser errors - -if [ ! -f set_event -o ! -d events/kmem ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/kmem/kmalloc/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/kmem/kmalloc/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - -[ -f error_log ] || exit_unsupported +# requires: set_event events/kmem/kmalloc/trigger events/kmem/kmalloc/hist error_log check_error() { # command-with-error-pos-by-^ ftrace_errlog_check 'hist:kmem:kmalloc' "$1" 'events/kmem/kmalloc/trigger' diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc index 177e8d4c4744..2950bfbc6fce 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test histogram trigger +# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist # flags: instance fail() { #msg @@ -8,22 +9,7 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - -echo "Test histogram basic tigger" +echo "Test histogram basic trigger" echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc index 68ff3f45c720..7129b52da947 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test multiple histogram triggers +# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist # flags: instance fail() { #msg @@ -8,21 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test histogram multiple triggers" echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc index ac738500d17f..33f5bdee387f 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc @@ -1,27 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test snapshot-trigger +# requires: set_event events/sched/sched_process_fork/trigger snapshot fail() { #msg echo $1 exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f snapshot ]; then - echo "snapshot is not supported" - exit_unsupported -fi - FEATURE=`grep snapshot events/sched/sched_process_fork/trigger` if [ -z "$FEATURE" ]; then echo "snapshot trigger is not supported" diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc index 398c05c4d2a7..320ea9b3c6cd 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc @@ -1,29 +1,20 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test stacktrace-trigger +# requires: set_event events/sched/sched_process_fork/trigger fail() { #msg echo $1 exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - FEATURE=`grep stacktrace events/sched/sched_process_fork/trigger` if [ -z "$FEATURE" ]; then echo "stacktrace trigger is not supported" exit_unsupported fi -echo "Test stacktrace tigger" +echo "Test stacktrace trigger" echo 0 > trace echo 0 > options/stacktrace echo 'stacktrace' > events/sched/sched_process_fork/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc index ab6bedb25736..68f3af9d9910 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: trace_marker trigger - test histogram trigger +# requires: set_event events/ftrace/print/trigger events/ftrace/print/hist # flags: instance fail() { #msg @@ -8,27 +9,7 @@ fail() { #msg exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -d events/ftrace/print ]; then - echo "event trace_marker is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - -echo "Test histogram trace_marker tigger" +echo "Test histogram trace_marker trigger" echo 'hist:keys=common_pid' > events/ftrace/print/trigger for i in `seq 1 10` ; do echo "hello" > trace_marker; done diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc index df246e505af7..27da2dba9b9e 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: trace_marker trigger - test snapshot trigger +# requires: set_event snapshot events/ftrace/print/trigger # flags: instance fail() { #msg @@ -8,26 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f snapshot ]; then - echo "snapshot is not supported" - exit_unsupported -fi - -if [ ! -d events/ftrace/print ]; then - echo "event trace_marker is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - test_trace() { file=$1 x=$2 @@ -46,7 +27,7 @@ test_trace() { done } -echo "Test snapshot trace_marker tigger" +echo "Test snapshot trace_marker trigger" echo 'snapshot' > events/ftrace/print/trigger diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc index 18b4d1c2807e..531139f41e94 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: trace_marker trigger - test histogram with synthetic event against kernel event +# requires: set_event synthetic_events events/sched/sched_waking events/ftrace/print/trigger events/ftrace/print/hist # flags: fail() { #msg @@ -8,36 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic events not supported" - exit_unsupported -fi - -if [ ! -d events/ftrace/print ]; then - echo "event trace_marker is not supported" - exit_unsupported -fi - -if [ ! -d events/sched/sched_waking ]; then - echo "event sched_waking is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test histogram kernel event to trace_marker latency histogram trigger" echo 'latency u64 lat' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc index dd262d6d0db6..cc99cbb06d5e 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc @@ -1,6 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: trace_marker trigger - test histogram with synthetic event +# requires: set_event synthetic_events events/ftrace/print/trigger events/ftrace/print/hist # flags: fail() { #msg @@ -8,31 +9,6 @@ fail() { #msg exit_fail } -if [ ! -f set_event ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f synthetic_events ]; then - echo "synthetic events not supported" - exit_unsupported -fi - -if [ ! -d events/ftrace/print ]; then - echo "event trace_marker is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - -if [ ! -f events/ftrace/print/hist ]; then - echo "hist trigger is not supported" - exit_unsupported -fi - echo "Test histogram trace_marker to trace_marker latency histogram trigger" echo 'latency u64 lat' > synthetic_events diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc index d5d2dcbc9cab..9ca04678f4da 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc @@ -1,22 +1,13 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test traceon/off trigger +# requires: set_event events/sched/sched_process_fork/trigger fail() { #msg echo $1 exit_fail } -if [ ! -f set_event -o ! -d events/sched ]; then - echo "event tracing is not supported" - exit_unsupported -fi - -if [ ! -f events/sched/sched_process_fork/trigger ]; then - echo "event trigger is not supported" - exit_unsupported -fi - echo "Test traceoff trigger" echo 1 > tracing_on echo 'traceoff' > events/sched/sched_process_fork/trigger diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index 383bac05ac32..ceaad78e9667 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -15,8 +15,9 @@ #include #include #include +#include #include -#include +#include #include #include #include @@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt) { char control[CMSG_SPACE(sizeof(struct sock_extended_err)) + CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0}; - char data[sizeof(struct ipv6hdr) + - sizeof(struct tcphdr) + 1]; + char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + + sizeof(struct udphdr) + 1]; struct sock_extended_err *err; struct msghdr msg = {0}; struct iovec iov = {0}; @@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt) msg.msg_controllen = sizeof(control); while (1) { + const char *reason; + ret = recvmsg(fdt, &msg, MSG_ERRQUEUE); if (ret == -1 && errno == EAGAIN) break; @@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt) err = (struct sock_extended_err *)CMSG_DATA(cm); if (err->ee_origin != SO_EE_ORIGIN_TXTIME) error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin); - if (err->ee_code != ECANCELED) - error(1, 0, "errqueue: code 0x%x\n", err->ee_code); + + switch (err->ee_errno) { + case ECANCELED: + if (err->ee_code != SO_EE_CODE_TXTIME_MISSED) + error(1, 0, "errqueue: unknown ECANCELED %u\n", + err->ee_code); + reason = "missed txtime"; + break; + case EINVAL: + if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM) + error(1, 0, "errqueue: unknown EINVAL %u\n", + err->ee_code); + reason = "invalid txtime"; + break; + default: + error(1, 0, "errqueue: errno %u code %u\n", + err->ee_errno, err->ee_code); + }; tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info; tstamp -= (int64_t) glob_tstart; tstamp /= 1000 * 1000; - fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n", - data[ret - 1], tstamp); + fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n", + data[ret - 1], tstamp, reason); msg.msg_flags = 0; msg.msg_controllen = sizeof(control); diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 9c0f758310fe..a179f0dca8ce 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -3,7 +3,7 @@ TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ - nft_concat_range.sh \ + nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh LDLIBS = -lmnl diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh new file mode 100755 index 000000000000..edf0a48da6bf --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh @@ -0,0 +1,175 @@ +#!/bin/bash +# +# This tests connection tracking helper assignment: +# 1. can attach ftp helper to a connection from nft ruleset. +# 2. auto-assign still works. +# +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 +ret=0 + +sfx=$(mktemp -u "XXXXXXXX") +ns1="ns1-$sfx" +ns2="ns2-$sfx" +testipv6=1 + +cleanup() +{ + ip netns del ${ns1} + ip netns del ${ns2} +} + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +conntrack -V > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without conntrack tool" + exit $ksft_skip +fi + +which nc >/dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without netcat tool" + exit $ksft_skip +fi + +trap cleanup EXIT + +ip netns add ${ns1} +ip netns add ${ns2} + +ip link add veth0 netns ${ns1} type veth peer name veth0 netns ${ns2} > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: No virtual ethernet pair device support in kernel" + exit $ksft_skip +fi + +ip -net ${ns1} link set lo up +ip -net ${ns1} link set veth0 up + +ip -net ${ns2} link set lo up +ip -net ${ns2} link set veth0 up + +ip -net ${ns1} addr add 10.0.1.1/24 dev veth0 +ip -net ${ns1} addr add dead:1::1/64 dev veth0 + +ip -net ${ns2} addr add 10.0.1.2/24 dev veth0 +ip -net ${ns2} addr add dead:1::2/64 dev veth0 + +load_ruleset_family() { + local family=$1 + local ns=$2 + +ip netns exec ${ns} nft -f - < /dev/null |grep -q 'helper=ftp' + if [ $? -ne 0 ] ; then + echo "FAIL: ${netns} did not show attached helper $message" 1>&2 + ret=1 + fi + + echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2 + return 0 +} + +test_helper() +{ + local port=$1 + local msg=$2 + + sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null & + + sleep 1 + sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null & + + check_for_helper "$ns1" "ip $msg" $port + check_for_helper "$ns2" "ip $msg" $port + + wait + + if [ $testipv6 -eq 0 ] ;then + return 0 + fi + + ip netns exec ${ns1} conntrack -F 2> /dev/null + ip netns exec ${ns2} conntrack -F 2> /dev/null + + sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null & + + sleep 1 + sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null & + + check_for_helper "$ns1" "ipv6 $msg" $port + check_for_helper "$ns2" "ipv6 $msg" $port + + wait +} + +load_ruleset_family ip ${ns1} +if [ $? -ne 0 ];then + echo "FAIL: ${ns1} cannot load ip ruleset" 1>&2 + exit 1 +fi + +load_ruleset_family ip6 ${ns1} +if [ $? -ne 0 ];then + echo "SKIP: ${ns1} cannot load ip6 ruleset" 1>&2 + testipv6=0 +fi + +load_ruleset_family inet ${ns2} +if [ $? -ne 0 ];then + echo "SKIP: ${ns1} cannot load inet ruleset" 1>&2 + load_ruleset_family ip ${ns2} + if [ $? -ne 0 ];then + echo "FAIL: ${ns2} cannot load ip ruleset" 1>&2 + exit 1 + fi + + if [ $testipv6 -eq 1 ] ;then + load_ruleset_family ip6 ${ns2} + if [ $? -ne 0 ];then + echo "FAIL: ${ns2} cannot load ip6 ruleset" 1>&2 + exit 1 + fi + fi +fi + +test_helper 2121 "set via ruleset" +ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1' +ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1' +test_helper 21 "auto-assign" + +exit $ret diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h index c1921a53dbed..8d728eda783d 100644 --- a/tools/testing/selftests/pidfd/pidfd.h +++ b/tools/testing/selftests/pidfd/pidfd.h @@ -95,4 +95,9 @@ static inline int sys_pidfd_getfd(int pidfd, int fd, int flags) return syscall(__NR_pidfd_getfd, pidfd, fd, flags); } +static inline int sys_memfd_create(const char *name, unsigned int flags) +{ + return syscall(__NR_memfd_create, name, flags); +} + #endif /* __PIDFD_H */ diff --git a/tools/testing/selftests/pidfd/pidfd_getfd_test.c b/tools/testing/selftests/pidfd/pidfd_getfd_test.c index 401a7c1d0312..84b65ecccb04 100644 --- a/tools/testing/selftests/pidfd/pidfd_getfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_getfd_test.c @@ -34,11 +34,6 @@ static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1, return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2); } -static int sys_memfd_create(const char *name, unsigned int flags) -{ - return syscall(__NR_memfd_create, name, flags); -} - static int __child(int sk, int memfd) { int ret; diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c index 133ec5b6cda8..9418108eae13 100644 --- a/tools/testing/selftests/pidfd/pidfd_setns_test.c +++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c @@ -470,4 +470,16 @@ TEST_F(current_nsset, no_foul_play) } } +TEST(setns_einval) +{ + int fd; + + fd = sys_memfd_create("rostock", 0); + EXPECT_GT(fd, 0); + + ASSERT_NE(setns(fd, 0), 0); + EXPECT_EQ(errno, EINVAL); + close(fd); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index ca35dd8848b0..af3df79d8163 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile @@ -7,7 +7,7 @@ noarg: # The EBB handler is 64-bit code and everything links against it CFLAGS += -m64 -TMPOUT = $(OUTPUT)/ +TMPOUT = $(OUTPUT)/TMPDIR/ # Toolchains may build PIE by default which breaks the assembly no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie) diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index c0aa46ce14f6..252140a52553 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1615,6 +1615,7 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS s390_regs # define SYSCALL_NUM gprs[2] # define SYSCALL_RET gprs[2] +# define SYSCALL_NUM_RET_SHARE_REG #elif defined(__mips__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM regs[2] diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json index 47a3082b6661..503982b8f295 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json @@ -260,10 +260,10 @@ 255 ] ], - "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 12345", + "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 123456", "expExitCode": "255", "verifyCmd": "$TC action ls action bpf", - "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 12345", + "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 123456", "matchCount": "0", "teardown": [ "$TC action flush action bpf" diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json index 88ec134872e4..072febf25f55 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json @@ -469,7 +469,7 @@ 255 ] ], - "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"", + "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"", "expExitCode": "0", "verifyCmd": "$TC actions ls action csum", "matchPattern": "^[ \t]+index [0-9]* ref", @@ -492,7 +492,7 @@ 1, 255 ], - "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"" + "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"" ], "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"", "expExitCode": "0", diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index fbeb9197697d..d06346968bcb 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -629,7 +629,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action tunnel_key" @@ -653,7 +653,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1", "matchCount": "1", "teardown": [ "$TC actions flush action tunnel_key" @@ -677,7 +677,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 824212:80:00880022.*index 1", "matchCount": "0", "teardown": [ "$TC actions flush action tunnel_key" @@ -701,7 +701,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:4224:00880022.*index 1", "matchCount": "0", "teardown": [ "$TC actions flush action tunnel_key" @@ -725,7 +725,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288.*index 1", "matchCount": "0", "teardown": [ "$TC actions flush action tunnel_key" @@ -749,7 +749,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288428822.*index 1", "matchCount": "0", "teardown": [ "$TC actions flush action tunnel_key" @@ -773,7 +773,7 @@ "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1", "expExitCode": "255", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:.*index 1", "matchCount": "0", "teardown": [ "$TC actions flush action tunnel_key" @@ -818,12 +818,12 @@ 1, 255 ], - "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a" + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie 123456" ], - "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1", + "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie 123456", "expExitCode": "0", "verifyCmd": "$TC actions get action tunnel_key index 1", - "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1", + "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie 123456", "matchCount": "1", "teardown": [ "$TC actions flush action tunnel_key" diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index 17a1f53ceba0..d77f4829f1e0 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -587,9 +587,20 @@ ip0 link set wg0 up kill $ncat_pid ip0 link del wg0 +# Ensure there aren't circular reference loops +ip1 link add wg1 type wireguard +ip2 link add wg2 type wireguard +ip1 link set wg1 netns $netns2 +ip2 link set wg2 netns $netns1 +pp ip netns delete $netns1 +pp ip netns delete $netns2 +pp ip netns add $netns1 +pp ip netns add $netns2 + +sleep 2 # Wait for cleanup and grace periods declare -A objects while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do - [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue + [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" done < /dev/kmsg alldeleted=1 diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 6683b4a70b05..caab980211a6 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -135,10 +136,4 @@ static inline void free_page(unsigned long addr) (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) -/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */ -#define list_add_tail(a, b) do {} while (0) -#define list_del(a) do {} while (0) -#define list_for_each_entry(a, b, c) while (0) -/* end of stubs */ - #endif /* KERNEL_H */ diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index b751350d4ce8..5d90254ddae4 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h @@ -11,12 +11,11 @@ struct device { struct virtio_device { struct device dev; u64 features; + struct list_head vqs; }; struct virtqueue { - /* TODO: commented as list macros are empty stubs for now. - * Broken but enough for virtio_ring.c - * struct list_head list; */ + struct list_head list; void (*callback)(struct virtqueue *vq); const char *name; struct virtio_device *vdev; diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c index b427def67e7e..cb3f29c09aff 100644 --- a/tools/virtio/virtio_test.c +++ b/tools/virtio/virtio_test.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include +#include #include #include #include @@ -18,6 +19,8 @@ #include #include "../../drivers/vhost/test.h" +#define RANDOM_BATCH -1 + /* Unused */ void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; @@ -43,6 +46,10 @@ struct vdev_info { struct vhost_memory *mem; }; +static const struct vhost_vring_file no_backend = { .fd = -1 }, + backend = { .fd = 1 }; +static const struct vhost_vring_state null_state = {}; + bool vq_notify(struct virtqueue *vq) { struct vq_info *info = vq->priv; @@ -88,6 +95,19 @@ void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info) assert(r >= 0); } +static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev) +{ + if (info->vq) + vring_del_virtqueue(info->vq); + + memset(info->ring, 0, vring_size(num, 4096)); + vring_init(&info->vring, num, info->ring, 4096); + info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true, + false, vq_notify, vq_callback, "test"); + assert(info->vq); + info->vq->priv = info; +} + static void vq_info_add(struct vdev_info *dev, int num) { struct vq_info *info = &dev->vqs[dev->nvqs]; @@ -97,14 +117,7 @@ static void vq_info_add(struct vdev_info *dev, int num) info->call = eventfd(0, EFD_NONBLOCK); r = posix_memalign(&info->ring, 4096, vring_size(num, 4096)); assert(r >= 0); - memset(info->ring, 0, vring_size(num, 4096)); - vring_init(&info->vring, num, info->ring, 4096); - info->vq = vring_new_virtqueue(info->idx, - info->vring.num, 4096, &dev->vdev, - true, false, info->ring, - vq_notify, vq_callback, "test"); - assert(info->vq); - info->vq->priv = info; + vq_reset(info, num, &dev->vdev); vhost_vq_setup(dev, info); dev->fds[info->idx].fd = info->call; dev->fds[info->idx].events = POLLIN; @@ -116,6 +129,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features) int r; memset(dev, 0, sizeof *dev); dev->vdev.features = features; + INIT_LIST_HEAD(&dev->vdev.vqs); dev->buf_size = 1024; dev->buf = malloc(dev->buf_size); assert(dev->buf); @@ -152,41 +166,93 @@ static void wait_for_interrupt(struct vdev_info *dev) } static void run_test(struct vdev_info *dev, struct vq_info *vq, - bool delayed, int bufs) + bool delayed, int batch, int reset_n, int bufs) { struct scatterlist sl; - long started = 0, completed = 0; - long completed_before; + long started = 0, completed = 0, next_reset = reset_n; + long completed_before, started_before; int r, test = 1; unsigned len; long long spurious = 0; + const bool random_batch = batch == RANDOM_BATCH; + r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); + if (!reset_n) { + next_reset = INT_MAX; + } + for (;;) { virtqueue_disable_cb(vq->vq); completed_before = completed; + started_before = started; do { - if (started < bufs) { + const bool reset = completed > next_reset; + if (random_batch) + batch = (random() % vq->vring.num) + 1; + + while (started < bufs && + (started - completed) < batch) { sg_init_one(&sl, dev->buf, dev->buf_size); r = virtqueue_add_outbuf(vq->vq, &sl, 1, dev->buf + started, GFP_ATOMIC); - if (likely(r == 0)) { - ++started; - if (unlikely(!virtqueue_kick(vq->vq))) + if (unlikely(r != 0)) { + if (r == -ENOSPC && + started > started_before) + r = 0; + else r = -1; + break; } - } else + + ++started; + + if (unlikely(!virtqueue_kick(vq->vq))) { + r = -1; + break; + } + } + + if (started >= bufs) r = -1; + if (reset) { + r = ioctl(dev->control, VHOST_TEST_SET_BACKEND, + &no_backend); + assert(!r); + } + /* Flush out completed bufs if any */ - if (virtqueue_get_buf(vq->vq, &len)) { + while (virtqueue_get_buf(vq->vq, &len)) { ++completed; r = 0; } + if (reset) { + struct vhost_vring_state s = { .index = 0 }; + + vq_reset(vq, vq->vring.num, &dev->vdev); + + r = ioctl(dev->control, VHOST_GET_VRING_BASE, + &s); + assert(!r); + + s.num = 0; + r = ioctl(dev->control, VHOST_SET_VRING_BASE, + &null_state); + assert(!r); + + r = ioctl(dev->control, VHOST_TEST_SET_BACKEND, + &backend); + assert(!r); + + started = completed; + while (completed > next_reset) + next_reset += completed; + } } while (r == 0); - if (completed == completed_before) + if (completed == completed_before && started == started_before) ++spurious; assert(completed <= bufs); assert(started <= bufs); @@ -203,7 +269,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, test = 0; r = ioctl(dev->control, VHOST_TEST_RUN, &test); assert(r >= 0); - fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious); + fprintf(stderr, + "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n", + spurious, started, completed); } const char optstring[] = "h"; @@ -244,6 +312,16 @@ const struct option longopts[] = { .name = "no-delayed-interrupt", .val = 'd', }, + { + .name = "batch", + .val = 'b', + .has_arg = required_argument, + }, + { + .name = "reset", + .val = 'r', + .has_arg = optional_argument, + }, { } }; @@ -255,6 +333,8 @@ static void help(void) " [--no-event-idx]" " [--no-virtio-1]" " [--delayed-interrupt]" + " [--batch=random/N]" + " [--reset=N]" "\n"); } @@ -263,6 +343,7 @@ int main(int argc, char **argv) struct vdev_info dev; unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1); + long batch = 1, reset = 0; int o; bool delayed = false; @@ -289,6 +370,24 @@ int main(int argc, char **argv) case 'D': delayed = true; break; + case 'b': + if (0 == strcmp(optarg, "random")) { + batch = RANDOM_BATCH; + } else { + batch = strtol(optarg, NULL, 10); + assert(batch > 0); + assert(batch < (long)INT_MAX + 1); + } + break; + case 'r': + if (!optarg) { + reset = 1; + } else { + reset = strtol(optarg, NULL, 10); + assert(reset > 0); + assert(reset < (long)INT_MAX + 1); + } + break; default: assert(0); break; @@ -298,6 +397,6 @@ int main(int argc, char **argv) done: vdev_info_init(&dev, features); vq_info_add(&dev, 256); - run_test(&dev, &dev.vqs[0], delayed, 0x100000); + run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); return 0; } diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c index 293653463303..fa87b58bd5fa 100644 --- a/tools/virtio/vringh_test.c +++ b/tools/virtio/vringh_test.c @@ -307,6 +307,7 @@ static int parallel_test(u64 features, close(to_host[0]); gvdev.vdev.features = features; + INIT_LIST_HEAD(&gvdev.vdev.vqs); gvdev.to_host_fd = to_host[1]; gvdev.notifies = 0; @@ -453,6 +454,7 @@ int main(int argc, char *argv[]) getrange = getrange_iov; vdev.features = 0; + INIT_LIST_HEAD(&vdev.vqs); while (argv[1]) { if (strcmp(argv[1], "--indirect") == 0)